summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.bzrignore11
-rw-r--r--client/mysql.cc59
-rw-r--r--client/mysqldump.c58
-rw-r--r--client/mysqlimport.c45
-rw-r--r--client/mysqlslap.c27
-rw-r--r--client/mysqltest.c86
-rw-r--r--configure.in12
-rw-r--r--extra/yassl/FLOSS-EXCEPTIONS120
-rw-r--r--extra/yassl/README12
-rw-r--r--extra/yassl/include/buffer.hpp4
-rw-r--r--extra/yassl/include/cert_wrapper.hpp4
-rw-r--r--extra/yassl/include/crypto_wrapper.hpp8
-rw-r--r--extra/yassl/include/factory.hpp4
-rw-r--r--extra/yassl/include/handshake.hpp6
-rw-r--r--extra/yassl/include/lock.hpp4
-rw-r--r--extra/yassl/include/log.hpp4
-rw-r--r--extra/yassl/include/openssl/ssl.h12
-rw-r--r--extra/yassl/include/socket_wrapper.hpp4
-rw-r--r--extra/yassl/include/timer.hpp4
-rw-r--r--extra/yassl/include/yassl_error.hpp4
-rw-r--r--extra/yassl/include/yassl_imp.hpp8
-rw-r--r--extra/yassl/include/yassl_int.hpp10
-rw-r--r--extra/yassl/include/yassl_types.hpp4
-rw-r--r--extra/yassl/mySTL/algorithm.hpp4
-rw-r--r--extra/yassl/mySTL/helpers.hpp4
-rw-r--r--extra/yassl/mySTL/list.hpp4
-rw-r--r--extra/yassl/mySTL/memory.hpp4
-rw-r--r--extra/yassl/mySTL/pair.hpp4
-rw-r--r--extra/yassl/mySTL/stdexcept.hpp4
-rw-r--r--extra/yassl/mySTL/vector.hpp4
-rw-r--r--extra/yassl/src/buffer.cpp4
-rw-r--r--extra/yassl/src/cert_wrapper.cpp7
-rw-r--r--extra/yassl/src/crypto_wrapper.cpp19
-rw-r--r--extra/yassl/src/handshake.cpp12
-rw-r--r--extra/yassl/src/lock.cpp4
-rw-r--r--extra/yassl/src/log.cpp4
-rw-r--r--extra/yassl/src/socket_wrapper.cpp4
-rw-r--r--extra/yassl/src/ssl.cpp50
-rw-r--r--extra/yassl/src/template_instnt.cpp4
-rw-r--r--extra/yassl/src/timer.cpp4
-rw-r--r--extra/yassl/src/yassl.cpp4
-rw-r--r--extra/yassl/src/yassl_error.cpp4
-rw-r--r--extra/yassl/src/yassl_imp.cpp22
-rw-r--r--extra/yassl/src/yassl_int.cpp43
-rw-r--r--extra/yassl/taocrypt/include/aes.hpp4
-rw-r--r--extra/yassl/taocrypt/include/algebra.hpp4
-rw-r--r--extra/yassl/taocrypt/include/arc4.hpp4
-rw-r--r--extra/yassl/taocrypt/include/asn.hpp4
-rw-r--r--extra/yassl/taocrypt/include/block.hpp4
-rw-r--r--extra/yassl/taocrypt/include/blowfish.hpp4
-rw-r--r--extra/yassl/taocrypt/include/coding.hpp4
-rw-r--r--extra/yassl/taocrypt/include/des.hpp4
-rw-r--r--extra/yassl/taocrypt/include/dh.hpp4
-rw-r--r--extra/yassl/taocrypt/include/dsa.hpp4
-rw-r--r--extra/yassl/taocrypt/include/error.hpp4
-rw-r--r--extra/yassl/taocrypt/include/file.hpp4
-rw-r--r--extra/yassl/taocrypt/include/hash.hpp4
-rw-r--r--extra/yassl/taocrypt/include/hmac.hpp4
-rw-r--r--extra/yassl/taocrypt/include/integer.hpp4
-rw-r--r--extra/yassl/taocrypt/include/kernelc.hpp4
-rw-r--r--extra/yassl/taocrypt/include/md2.hpp4
-rw-r--r--extra/yassl/taocrypt/include/md4.hpp4
-rw-r--r--extra/yassl/taocrypt/include/md5.hpp4
-rw-r--r--extra/yassl/taocrypt/include/misc.hpp4
-rw-r--r--extra/yassl/taocrypt/include/modarith.hpp4
-rw-r--r--extra/yassl/taocrypt/include/modes.hpp4
-rw-r--r--extra/yassl/taocrypt/include/pwdbased.hpp4
-rw-r--r--extra/yassl/taocrypt/include/random.hpp4
-rw-r--r--extra/yassl/taocrypt/include/ripemd.hpp4
-rw-r--r--extra/yassl/taocrypt/include/rsa.hpp4
-rw-r--r--extra/yassl/taocrypt/include/runtime.hpp4
-rw-r--r--extra/yassl/taocrypt/include/sha.hpp4
-rw-r--r--extra/yassl/taocrypt/include/twofish.hpp4
-rw-r--r--extra/yassl/taocrypt/include/type_traits.hpp4
-rw-r--r--extra/yassl/taocrypt/include/types.hpp4
-rw-r--r--extra/yassl/taocrypt/src/aes.cpp4
-rw-r--r--extra/yassl/taocrypt/src/aestables.cpp4
-rw-r--r--extra/yassl/taocrypt/src/algebra.cpp4
-rw-r--r--extra/yassl/taocrypt/src/arc4.cpp4
-rw-r--r--extra/yassl/taocrypt/src/asn.cpp15
-rw-r--r--extra/yassl/taocrypt/src/bftables.cpp4
-rw-r--r--extra/yassl/taocrypt/src/blowfish.cpp4
-rw-r--r--extra/yassl/taocrypt/src/coding.cpp4
-rw-r--r--extra/yassl/taocrypt/src/des.cpp4
-rw-r--r--extra/yassl/taocrypt/src/dh.cpp4
-rw-r--r--extra/yassl/taocrypt/src/dsa.cpp4
-rw-r--r--extra/yassl/taocrypt/src/file.cpp4
-rw-r--r--extra/yassl/taocrypt/src/hash.cpp4
-rw-r--r--extra/yassl/taocrypt/src/integer.cpp4
-rw-r--r--extra/yassl/taocrypt/src/md2.cpp4
-rw-r--r--extra/yassl/taocrypt/src/md4.cpp4
-rw-r--r--extra/yassl/taocrypt/src/md5.cpp4
-rw-r--r--extra/yassl/taocrypt/src/misc.cpp4
-rw-r--r--extra/yassl/taocrypt/src/random.cpp4
-rw-r--r--extra/yassl/taocrypt/src/ripemd.cpp4
-rw-r--r--extra/yassl/taocrypt/src/rsa.cpp4
-rw-r--r--extra/yassl/taocrypt/src/sha.cpp4
-rw-r--r--extra/yassl/taocrypt/src/template_instnt.cpp4
-rw-r--r--extra/yassl/taocrypt/src/tftables.cpp4
-rw-r--r--extra/yassl/taocrypt/src/twofish.cpp4
-rw-r--r--extra/yassl/yassl.dsp8
-rw-r--r--include/my_base.h12
-rw-r--r--include/my_global.h4
-rw-r--r--include/my_handler.h1
-rw-r--r--include/my_sys.h4
-rw-r--r--include/mysql.h17
-rw-r--r--include/sql_common.h4
-rw-r--r--libmysql/libmysql.c24
-rw-r--r--libmysqld/CMakeLists.txt7
-rw-r--r--libmysqld/Makefile.am2
-rw-r--r--libmysqld/embedded_priv.h6
-rw-r--r--libmysqld/lib_sql.cc30
-rw-r--r--libmysqld/libmysqld.c54
-rw-r--r--mysql-test/Makefile.am1
-rw-r--r--mysql-test/extra/binlog_tests/binlog.test32
-rw-r--r--mysql-test/extra/binlog_tests/blackhole.test10
-rw-r--r--mysql-test/extra/binlog_tests/mix_innodb_myisam_binlog.test27
-rw-r--r--mysql-test/extra/rpl_tests/rpl_auto_increment.test40
-rw-r--r--mysql-test/extra/rpl_tests/rpl_insert_id.test140
-rw-r--r--mysql-test/extra/rpl_tests/rpl_insert_id_pk.test3
-rw-r--r--mysql-test/extra/rpl_tests/rpl_loaddata.test5
-rw-r--r--mysql-test/extra/rpl_tests/rpl_multi_update3.test2
-rw-r--r--mysql-test/extra/rpl_tests/rpl_row_sp006.test1
-rw-r--r--mysql-test/include/check-testcase.test4
-rw-r--r--mysql-test/include/ndb_default_cluster.inc2
-rw-r--r--mysql-test/lib/mtr_cases.pl194
-rw-r--r--mysql-test/lib/mtr_process.pl487
-rw-r--r--mysql-test/lib/mtr_report.pl44
-rw-r--r--mysql-test/lib/mtr_stress.pl5
-rwxr-xr-xmysql-test/mysql-test-run.pl1741
-rw-r--r--mysql-test/mysql-test-run.sh7
-rw-r--r--mysql-test/r/analyse.result24
-rw-r--r--mysql-test/r/archive.result2
-rw-r--r--mysql-test/r/auto_increment.result60
-rw-r--r--mysql-test/r/bigint.result2
-rw-r--r--mysql-test/r/binlog_row_binlog.result34
-rw-r--r--mysql-test/r/binlog_row_blackhole.result9
-rw-r--r--mysql-test/r/binlog_row_mix_innodb_myisam.result304
-rw-r--r--mysql-test/r/binlog_stm_binlog.result32
-rw-r--r--mysql-test/r/binlog_stm_blackhole.result14
-rw-r--r--mysql-test/r/binlog_stm_mix_innodb_myisam.result190
-rw-r--r--mysql-test/r/create.result8
-rw-r--r--mysql-test/r/create_not_windows.result17
-rw-r--r--mysql-test/r/ctype_ucs2_def.result3
-rw-r--r--mysql-test/r/date_formats.result22
-rw-r--r--mysql-test/r/events_logs_tests.result2
-rw-r--r--mysql-test/r/federated.result134
-rw-r--r--mysql-test/r/func_compress.result4
-rw-r--r--mysql-test/r/func_gconcat.result13
-rw-r--r--mysql-test/r/func_group.result20
-rw-r--r--mysql-test/r/func_group_innodb.result147
-rw-r--r--mysql-test/r/func_math.result2
-rw-r--r--mysql-test/r/func_sapdb.result6
-rw-r--r--mysql-test/r/func_str.result46
-rw-r--r--mysql-test/r/func_system.result2
-rw-r--r--mysql-test/r/func_time.result10
-rw-r--r--mysql-test/r/func_timestamp.result2
-rw-r--r--mysql-test/r/gis-rtree.result40
-rw-r--r--mysql-test/r/gis.result10
-rw-r--r--mysql-test/r/group_by.result25
-rw-r--r--mysql-test/r/group_min_max.result43
-rw-r--r--mysql-test/r/group_min_max_innodb.result72
-rw-r--r--mysql-test/r/information_schema.result158
-rw-r--r--mysql-test/r/information_schema_part.result48
-rw-r--r--mysql-test/r/init_connect.result114
-rw-r--r--mysql-test/r/init_file.result15
-rw-r--r--mysql-test/r/innodb.result4
-rw-r--r--mysql-test/r/innodb_mysql.result35
-rw-r--r--mysql-test/r/insert.result15
-rw-r--r--mysql-test/r/join_outer.result19
-rw-r--r--mysql-test/r/join_outer_innodb.result19
-rw-r--r--mysql-test/r/lock.result7
-rw-r--r--mysql-test/r/lock_multi.result31
-rw-r--r--mysql-test/r/log_state.result2
-rw-r--r--mysql-test/r/lowercase_fs_off.result11
-rw-r--r--mysql-test/r/merge.result8
-rw-r--r--mysql-test/r/myisam.result16
-rw-r--r--mysql-test/r/mysql.result8
-rw-r--r--mysql-test/r/mysql_client.result4
-rw-r--r--mysql-test/r/mysqldump-max.result12
-rw-r--r--mysql-test/r/mysqldump.result337
-rw-r--r--mysql-test/r/mysqltest.result8
-rw-r--r--mysql-test/r/ndb_binlog_discover.result1
-rw-r--r--mysql-test/r/ndb_cache_multi.result2
-rw-r--r--mysql-test/r/ndb_dd_advance.result1088
-rw-r--r--mysql-test/r/ndb_dd_advance2.result746
-rw-r--r--mysql-test/r/ndb_dd_backuprestore.result60
-rw-r--r--mysql-test/r/ndb_default_cluster.require2
-rw-r--r--mysql-test/r/ndb_partition_key.result8
-rw-r--r--mysql-test/r/ndb_partition_range.result6
-rw-r--r--mysql-test/r/ndb_replace.result45
-rw-r--r--mysql-test/r/ndb_restore.result37
-rw-r--r--mysql-test/r/ndb_trigger.result119
-rw-r--r--mysql-test/r/odbc.result11
-rw-r--r--mysql-test/r/olap.result4
-rw-r--r--mysql-test/r/partition.result70
-rw-r--r--mysql-test/r/partition_hash.result84
-rw-r--r--mysql-test/r/partition_innodb.result112
-rw-r--r--mysql-test/r/partition_list.result94
-rw-r--r--mysql-test/r/partition_mgm.result12
-rw-r--r--mysql-test/r/partition_order.result56
-rw-r--r--mysql-test/r/partition_pruning.result78
-rw-r--r--mysql-test/r/partition_range.result190
-rw-r--r--mysql-test/r/ps.result119
-rw-r--r--mysql-test/r/ps_2myisam.result4
-rw-r--r--mysql-test/r/ps_3innodb.result4
-rw-r--r--mysql-test/r/ps_4heap.result4
-rw-r--r--mysql-test/r/ps_5merge.result8
-rw-r--r--mysql-test/r/ps_6bdb.result4
-rw-r--r--mysql-test/r/ps_7ndb.result4
-rw-r--r--mysql-test/r/query_cache.result2
-rw-r--r--mysql-test/r/range.result21
-rw-r--r--mysql-test/r/rpl_auto_increment.result44
-rw-r--r--mysql-test/r/rpl_drop_db.result2
-rw-r--r--mysql-test/r/rpl_get_lock.result2
-rw-r--r--mysql-test/r/rpl_insert.result16
-rw-r--r--mysql-test/r/rpl_insert_id.result138
-rw-r--r--mysql-test/r/rpl_insert_id_pk.result1
-rw-r--r--mysql-test/r/rpl_loaddata.result13
-rw-r--r--mysql-test/r/rpl_master_pos_wait.result2
-rw-r--r--mysql-test/r/rpl_multi_update3.result1
-rw-r--r--mysql-test/r/rpl_ndb_auto_inc.result8
-rw-r--r--mysql-test/r/rpl_ndb_multi_update3.result1
-rw-r--r--mysql-test/r/rpl_ndb_sp006.result1
-rw-r--r--mysql-test/r/rpl_row_create_table.result21
-rw-r--r--mysql-test/r/rpl_row_delayed_ins.result6
-rw-r--r--mysql-test/r/rpl_row_sp006_InnoDB.result1
-rw-r--r--mysql-test/r/rpl_stm_no_op.result2
-rw-r--r--mysql-test/r/rpl_switch_stm_row_mixed.result536
-rw-r--r--mysql-test/r/rpl_temporary.result10
-rw-r--r--mysql-test/r/rpl_variables.result1
-rw-r--r--mysql-test/r/select.result84
-rw-r--r--mysql-test/r/show_check.result62
-rw-r--r--mysql-test/r/sp-error.result2
-rw-r--r--mysql-test/r/sp-security.result41
-rw-r--r--mysql-test/r/sp-vars.result15
-rw-r--r--mysql-test/r/sp.result83
-rw-r--r--mysql-test/r/sp_notembedded.result25
-rw-r--r--mysql-test/r/sp_trans.result26
-rw-r--r--mysql-test/r/strict.result46
-rw-r--r--mysql-test/r/subselect.result162
-rw-r--r--mysql-test/r/subselect2.result12
-rw-r--r--mysql-test/r/symlink.result6
-rw-r--r--mysql-test/r/trigger.result14
-rw-r--r--mysql-test/r/type_blob.result2
-rw-r--r--mysql-test/r/type_newdecimal.result10
-rw-r--r--mysql-test/r/type_ranges.result4
-rw-r--r--mysql-test/r/type_timestamp.result2
-rw-r--r--mysql-test/r/udf.result24
-rw-r--r--mysql-test/r/union.result31
-rw-r--r--mysql-test/r/variables.result28
-rw-r--r--mysql-test/r/view.result100
-rw-r--r--mysql-test/r/view_grant.result53
-rw-r--r--mysql-test/r/wait_timeout.result4
-rw-r--r--mysql-test/std_data/init_file.dat28
-rw-r--r--mysql-test/t/auto_increment.test47
-rw-r--r--mysql-test/t/bdb.test1
-rw-r--r--mysql-test/t/create.test3
-rw-r--r--mysql-test/t/create_not_windows.test21
-rw-r--r--mysql-test/t/ctype_ucs2_def-master.opt2
-rw-r--r--mysql-test/t/ctype_ucs2_def.test5
-rw-r--r--mysql-test/t/date_formats.test22
-rw-r--r--mysql-test/t/disabled.def13
-rw-r--r--mysql-test/t/events_logs_tests.test2
-rw-r--r--mysql-test/t/federated.test123
-rw-r--r--mysql-test/t/func_gconcat.test14
-rw-r--r--mysql-test/t/func_group.test36
-rw-r--r--mysql-test/t/func_group_innodb.test85
-rw-r--r--mysql-test/t/func_sapdb.test2
-rw-r--r--mysql-test/t/func_str.test37
-rw-r--r--mysql-test/t/func_time.test13
-rw-r--r--mysql-test/t/func_timestamp.test6
-rw-r--r--mysql-test/t/gis-rtree.test44
-rw-r--r--mysql-test/t/gis.test7
-rw-r--r--mysql-test/t/group_by.test23
-rw-r--r--mysql-test/t/group_min_max.test73
-rw-r--r--mysql-test/t/group_min_max_innodb.test95
-rw-r--r--mysql-test/t/information_schema.test80
-rw-r--r--mysql-test/t/init_connect.test203
-rw-r--r--mysql-test/t/init_file.test14
-rw-r--r--mysql-test/t/innodb.test2
-rw-r--r--mysql-test/t/innodb_mysql.test40
-rw-r--r--mysql-test/t/insert.test7
-rw-r--r--mysql-test/t/join_outer.test18
-rw-r--r--mysql-test/t/join_outer_innodb.test26
-rw-r--r--mysql-test/t/lock.test15
-rw-r--r--mysql-test/t/lock_multi.test77
-rw-r--r--mysql-test/t/log_state.test3
-rw-r--r--mysql-test/t/lowercase_fs_off.test27
-rw-r--r--mysql-test/t/merge.test10
-rw-r--r--mysql-test/t/myisam.test42
-rw-r--r--mysql-test/t/mysql.test8
-rw-r--r--mysql-test/t/mysql_client.test29
-rw-r--r--mysql-test/t/mysqldump.test78
-rw-r--r--mysql-test/t/mysqltest.test8
-rw-r--r--mysql-test/t/ndb_alter_table.test1
-rw-r--r--mysql-test/t/ndb_autodiscover3.test9
-rw-r--r--mysql-test/t/ndb_binlog_discover.test16
-rw-r--r--mysql-test/t/ndb_blob_partition.test4
-rw-r--r--mysql-test/t/ndb_cache_multi.test7
-rwxr-xr-xmysql-test/t/ndb_dd_advance.test630
-rwxr-xr-xmysql-test/t/ndb_dd_advance2.test724
-rw-r--r--mysql-test/t/ndb_dd_backuprestore.test6
-rw-r--r--mysql-test/t/ndb_multi.test1
-rw-r--r--mysql-test/t/ndb_partition_error.test3
-rw-r--r--mysql-test/t/ndb_partition_key.test5
-rw-r--r--mysql-test/t/ndb_partition_list.test4
-rw-r--r--mysql-test/t/ndb_partition_range.test4
-rw-r--r--mysql-test/t/ndb_replace.test37
-rw-r--r--mysql-test/t/ndb_restore.test46
-rw-r--r--mysql-test/t/ndb_trigger.test92
-rw-r--r--mysql-test/t/odbc.test10
-rw-r--r--mysql-test/t/partition.test91
-rw-r--r--mysql-test/t/partition_hash.test30
-rw-r--r--mysql-test/t/partition_innodb.test82
-rw-r--r--mysql-test/t/partition_list.test44
-rw-r--r--mysql-test/t/partition_mgm.test16
-rw-r--r--mysql-test/t/partition_order.test18
-rw-r--r--mysql-test/t/partition_pruning.test52
-rw-r--r--mysql-test/t/partition_range.test131
-rw-r--r--mysql-test/t/ps.test142
-rw-r--r--mysql-test/t/ps_1general.test4
-rw-r--r--mysql-test/t/range.test25
-rw-r--r--mysql-test/t/rpl_drop_db.test4
-rw-r--r--mysql-test/t/rpl_insert.test41
-rw-r--r--mysql-test/t/rpl_ndb_bank.test1
-rw-r--r--mysql-test/t/rpl_ndb_dd_advance.test4
-rw-r--r--mysql-test/t/rpl_ndb_sync.test2
-rw-r--r--mysql-test/t/rpl_row_create_table.test7
-rw-r--r--mysql-test/t/rpl_stm_no_op.test2
-rw-r--r--mysql-test/t/rpl_switch_stm_row_mixed.test390
-rw-r--r--mysql-test/t/rpl_temporary.test11
-rw-r--r--mysql-test/t/rpl_variables.test3
-rw-r--r--mysql-test/t/select.test57
-rw-r--r--mysql-test/t/show_check.test76
-rw-r--r--mysql-test/t/sp-error.test4
-rw-r--r--mysql-test/t/sp-security.test69
-rw-r--r--mysql-test/t/sp-vars.test36
-rw-r--r--mysql-test/t/sp.test81
-rw-r--r--mysql-test/t/sp_notembedded.test24
-rw-r--r--mysql-test/t/sp_trans.test39
-rw-r--r--mysql-test/t/strict.test39
-rw-r--r--mysql-test/t/subselect.test126
-rw-r--r--mysql-test/t/subselect2.test18
-rw-r--r--mysql-test/t/trigger.test108
-rw-r--r--mysql-test/t/type_newdecimal.test11
-rw-r--r--mysql-test/t/type_timestamp.test6
-rw-r--r--mysql-test/t/udf.test19
-rw-r--r--mysql-test/t/union.test12
-rw-r--r--mysql-test/t/variables.test16
-rw-r--r--mysql-test/t/view.test92
-rw-r--r--mysql-test/t/view_grant.test62
-rw-r--r--mysql-test/t/wait_timeout.test18
-rw-r--r--mysys/my_append.c1
-rw-r--r--mysys/my_clock.c1
-rw-r--r--mysys/my_copy.c1
-rw-r--r--mysys/my_create.c1
-rw-r--r--mysys/my_dup.c1
-rw-r--r--mysys/my_handler.c1
-rw-r--r--mysys/my_lib.c3
-rw-r--r--mysys/my_malloc.c2
-rw-r--r--mysys/my_open.c1
-rw-r--r--mysys/my_redel.c1
-rw-r--r--mysys/my_rename.c1
-rw-r--r--mysys/safemalloc.c2
-rw-r--r--mysys/test_dir.c1
-rw-r--r--mysys/thr_lock.c2
-rwxr-xr-xnetware/BUILD/mwasmnlm5
-rwxr-xr-xnetware/BUILD/mwccnlm5
-rwxr-xr-xnetware/BUILD/mwldnlm5
-rw-r--r--scripts/Makefile.am3
-rw-r--r--scripts/make_binary_distribution.sh20
-rwxr-xr-xscripts/make_win_bin_dist116
-rw-r--r--scripts/mysqld_safe.sh11
-rw-r--r--server-tools/instance-manager/mysqlmanager.vcproj4
-rw-r--r--server-tools/instance-manager/parse.h2
-rw-r--r--sql-common/client.c125
-rw-r--r--sql/CMakeLists.txt2
-rw-r--r--sql/Makefile.am2
-rw-r--r--sql/event_timed.cc23
-rw-r--r--sql/events.cc12
-rw-r--r--sql/field.cc51
-rw-r--r--sql/field.h25
-rw-r--r--sql/field_conv.cc11
-rw-r--r--sql/ha_federated.cc456
-rw-r--r--sql/ha_federated.h14
-rw-r--r--sql/ha_myisam.cc3
-rw-r--r--sql/ha_myisammrg.cc2
-rw-r--r--sql/ha_ndbcluster.cc112
-rw-r--r--sql/ha_ndbcluster.h7
-rw-r--r--sql/ha_ndbcluster_binlog.cc43
-rw-r--r--sql/ha_partition.cc97
-rw-r--r--sql/ha_partition.h21
-rw-r--r--sql/handler.cc359
-rw-r--r--sql/handler.h89
-rw-r--r--sql/item.cc87
-rw-r--r--sql/item.h48
-rw-r--r--sql/item_cmpfunc.cc44
-rw-r--r--sql/item_cmpfunc.h2
-rw-r--r--sql/item_create.cc10
-rw-r--r--sql/item_create.h1
-rw-r--r--sql/item_func.cc38
-rw-r--r--sql/item_func.h22
-rw-r--r--sql/item_geofunc.h2
-rw-r--r--sql/item_strfunc.cc179
-rw-r--r--sql/item_strfunc.h42
-rw-r--r--sql/item_subselect.cc9
-rw-r--r--sql/item_subselect.h9
-rw-r--r--sql/item_sum.cc9
-rw-r--r--sql/item_timefunc.cc75
-rw-r--r--sql/item_xmlfunc.cc2
-rw-r--r--sql/lock.cc46
-rw-r--r--sql/log.cc71
-rw-r--r--sql/log_event.cc107
-rw-r--r--sql/log_event.h21
-rw-r--r--sql/mysql_priv.h41
-rw-r--r--sql/mysqld.cc9
-rw-r--r--sql/opt_range.cc190
-rw-r--r--sql/opt_range.h2
-rw-r--r--sql/rpl_filter.cc2
-rw-r--r--sql/rpl_filter.h2
-rw-r--r--sql/set_var.cc65
-rw-r--r--sql/set_var.h24
-rw-r--r--sql/share/errmsg.txt4
-rw-r--r--sql/slave.cc1075
-rw-r--r--sql/sp.cc147
-rw-r--r--sql/sp.h17
-rw-r--r--sql/sp_head.cc82
-rw-r--r--sql/sp_head.h32
-rw-r--r--sql/sql_acl.cc145
-rw-r--r--sql/sql_base.cc324
-rw-r--r--sql/sql_cache.cc2
-rw-r--r--sql/sql_class.cc72
-rw-r--r--sql/sql_class.h256
-rw-r--r--sql/sql_db.cc162
-rw-r--r--sql/sql_delete.cc6
-rw-r--r--sql/sql_handler.cc11
-rw-r--r--sql/sql_insert.cc500
-rw-r--r--sql/sql_lex.cc11
-rw-r--r--sql/sql_lex.h37
-rw-r--r--sql/sql_load.cc33
-rw-r--r--sql/sql_locale.cc1607
-rw-r--r--sql/sql_parse.cc541
-rw-r--r--sql/sql_partition.cc11
-rw-r--r--sql/sql_prepare.cc3
-rw-r--r--sql/sql_select.cc166
-rw-r--r--sql/sql_show.cc70
-rw-r--r--sql/sql_table.cc85
-rw-r--r--sql/sql_trigger.cc74
-rw-r--r--sql/sql_trigger.h12
-rw-r--r--sql/sql_udf.cc18
-rw-r--r--sql/sql_udf.h33
-rw-r--r--sql/sql_union.cc2
-rw-r--r--sql/sql_update.cc39
-rw-r--r--sql/sql_view.cc18
-rw-r--r--sql/sql_yacc.yy127
-rw-r--r--sql/structs.h96
-rw-r--r--sql/table.cc58
-rw-r--r--sql/table.h10
-rw-r--r--sql/time.cc5
-rw-r--r--sql/tztime.cc6
-rw-r--r--sql/unireg.cc51
-rw-r--r--storage/blackhole/ha_blackhole.cc2
-rw-r--r--storage/heap/hp_test1.c3
-rw-r--r--storage/myisam/mi_check.c5
-rw-r--r--storage/myisam/mi_create.c54
-rw-r--r--storage/myisam/mi_delete_table.c24
-rw-r--r--storage/myisam/mi_dynrec.c3
-rw-r--r--storage/myisam/mi_rkey.c24
-rw-r--r--storage/myisam/rt_index.c8
-rw-r--r--storage/myisam/rt_mbr.c6
-rw-r--r--storage/ndb/include/kernel/AttributeHeader.hpp3
-rw-r--r--storage/ndb/include/mgmapi/ndbd_exit_codes.h1
-rw-r--r--storage/ndb/include/ndbapi/Ndb.hpp1
-rw-r--r--storage/ndb/include/ndbapi/NdbDictionary.hpp3
-rw-r--r--storage/ndb/src/common/debugger/signaldata/SignalNames.cpp2
-rw-r--r--storage/ndb/src/kernel/blocks/ERROR_codes.txt7
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp5
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/DictLock.txt12
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp3
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp40
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp1
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp67
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp13
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp13
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp19
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp7
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp1
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp21
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp169
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp14
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp1
-rw-r--r--storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp1
-rw-r--r--storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp1
-rw-r--r--storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp41
-rw-r--r--storage/ndb/src/kernel/blocks/suma/Suma.cpp56
-rw-r--r--storage/ndb/src/kernel/blocks/suma/Suma.hpp3
-rw-r--r--storage/ndb/src/kernel/error/TimeModule.cpp2
-rw-r--r--storage/ndb/src/kernel/error/ndbd_exit_codes.c2
-rw-r--r--storage/ndb/src/kernel/vm/SimulatedBlock.cpp9
-rw-r--r--storage/ndb/src/kernel/vm/SimulatedBlock.hpp3
-rw-r--r--storage/ndb/src/ndbapi/ClusterMgr.cpp3
-rw-r--r--storage/ndb/src/ndbapi/DictCache.cpp13
-rw-r--r--storage/ndb/src/ndbapi/Ndb.cpp42
-rw-r--r--storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp24
-rw-r--r--storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp110
-rw-r--r--storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp2
-rw-r--r--storage/ndb/src/ndbapi/NdbScanOperation.cpp56
-rw-r--r--storage/ndb/src/ndbapi/Ndbif.cpp15
-rw-r--r--storage/ndb/src/ndbapi/TransporterFacade.cpp3
-rw-r--r--storage/ndb/src/ndbapi/ndberror.c2
-rw-r--r--storage/ndb/test/ndbapi/Makefile.am4
-rw-r--r--storage/ndb/test/ndbapi/testDict.cpp101
-rw-r--r--storage/ndb/test/ndbapi/testNodeRestart.cpp1
-rw-r--r--storage/ndb/test/ndbapi/test_event_merge.cpp12
-rw-r--r--storage/ndb/test/run-test/daily-basic-tests.txt8
-rw-r--r--storage/ndb/tools/desc.cpp3
-rw-r--r--storage/ndb/tools/restore/consumer_restore.cpp53
-rw-r--r--storage/ndb/tools/waiter.cpp4
-rw-r--r--strings/decimal.c13
-rw-r--r--strings/strtod.c3
-rw-r--r--support-files/mysql.spec.sh57
-rw-r--r--tests/Makefile.am6
-rw-r--r--tests/mysql_client_test.c303
-rw-r--r--unittest/mysys/my_atomic-t.c6
525 files changed, 20625 insertions, 5304 deletions
diff --git a/.bzrignore b/.bzrignore
index d570c36c7f5..90e966390d2 100644
--- a/.bzrignore
+++ b/.bzrignore
@@ -1,4 +1,6 @@
*-t
+*.Plo
+*.Po
*.a
*.bb
*.bbg
@@ -12,6 +14,7 @@
*.gcov
*.idb
*.la
+*.lai
*.lib
*.lo
*.map
@@ -23,6 +26,7 @@
*.res
*.sbr
*.so
+*.so.*
*.spec
*/*_pure_*warnings
*/.pure
@@ -283,6 +287,7 @@ build_tags.sh
client/#mysql.cc#
client/*.ds?
client/*.vcproj
+client/.libs -prune
client/completion_hash.cpp
client/decimal.c
client/insert_test
@@ -578,6 +583,7 @@ libmysqld/sql_insert.cc
libmysqld/sql_lex.cc
libmysqld/sql_list.cc
libmysqld/sql_load.cc
+libmysqld/sql_locale.cc
libmysqld/sql_manager.cc
libmysqld/sql_map.cc
libmysqld/sql_olap.cc
@@ -1093,6 +1099,7 @@ scripts/mysql_secure_installation
scripts/mysql_setpermission
scripts/mysql_tableinfo
scripts/mysql_upgrade
+scripts/mysql_upgrade_shell
scripts/mysql_zap
scripts/mysqlaccess
scripts/mysqlbug
@@ -1120,6 +1127,7 @@ server-tools/instance-manager/messages.cpp
server-tools/instance-manager/mysql_connection.cpp
server-tools/instance-manager/mysqlmanager
server-tools/instance-manager/mysqlmanager.cpp
+server-tools/instance-manager/net_serv.cc
server-tools/instance-manager/options.cpp
server-tools/instance-manager/parse.cpp
server-tools/instance-manager/parse_output.cpp
@@ -1753,6 +1761,7 @@ test1/*
test_xml
tests/*.ds?
tests/*.vcproj
+tests/.libs -prune
tests/client_test
tests/connect_test
tests/mysql_client_test
@@ -1760,6 +1769,7 @@ thr_insert_test/*
thr_test/*
thread_test
tmp/*
+tools/.libs -prune
tools/my_vsnprintf.c
tools/mysqlmanager
tools/mysqlmngd
@@ -1780,4 +1790,3 @@ vio/viotest-sslconnect.cpp
vio/viotest.cpp
zlib/*.ds?
zlib/*.vcproj
-server-tools/instance-manager/net_serv.cc
diff --git a/client/mysql.cc b/client/mysql.cc
index ab1f4bdc8c5..36c9ada441c 100644
--- a/client/mysql.cc
+++ b/client/mysql.cc
@@ -1093,7 +1093,7 @@ static int read_and_execute(bool interactive)
(We want to allow help, print and clear anywhere at line start
*/
if ((named_cmds || glob_buffer.is_empty())
- && !in_string && (com=find_command(line,0)))
+ && !ml_comment && !in_string && (com=find_command(line,0)))
{
if ((*com->func)(&glob_buffer,line) > 0)
break;
@@ -2995,6 +2995,7 @@ static int
com_use(String *buffer __attribute__((unused)), char *line)
{
char *tmp, buff[FN_REFLEN + 1];
+ int select_db;
bzero(buff, sizeof(buff));
strmov(buff, line);
@@ -3014,34 +3015,52 @@ com_use(String *buffer __attribute__((unused)), char *line)
if (!current_db || cmp_database(charset_info, current_db,tmp))
{
if (one_database)
+ {
skip_updates= 1;
+ select_db= 0; // don't do mysql_select_db()
+ }
else
- {
- /*
- reconnect once if connection is down or if connection was found to
- be down during query
- */
- if (!connected && reconnect())
+ select_db= 2; // do mysql_select_db() and build_completion_hash()
+ }
+ else
+ {
+ /*
+ USE to the current db specified.
+ We do need to send mysql_select_db() to make server
+ update database level privileges, which might
+ change since last USE (see bug#10979).
+ For performance purposes, we'll skip rebuilding of completion hash.
+ */
+ skip_updates= 0;
+ select_db= 1; // do only mysql_select_db(), without completion
+ }
+
+ if (select_db)
+ {
+ /*
+ reconnect once if connection is down or if connection was found to
+ be down during query
+ */
+ if (!connected && reconnect())
return opt_reconnect ? -1 : 1; // Fatal error
- if (mysql_select_db(&mysql,tmp))
- {
- if (mysql_errno(&mysql) != CR_SERVER_GONE_ERROR)
- return put_error(&mysql);
+ if (mysql_select_db(&mysql,tmp))
+ {
+ if (mysql_errno(&mysql) != CR_SERVER_GONE_ERROR)
+ return put_error(&mysql);
- if (reconnect())
+ if (reconnect())
return opt_reconnect ? -1 : 1; // Fatal error
- if (mysql_select_db(&mysql,tmp))
- return put_error(&mysql);
- }
- my_free(current_db,MYF(MY_ALLOW_ZERO_PTR));
- current_db=my_strdup(tmp,MYF(MY_WME));
+ if (mysql_select_db(&mysql,tmp))
+ return put_error(&mysql);
+ }
+ my_free(current_db,MYF(MY_ALLOW_ZERO_PTR));
+ current_db=my_strdup(tmp,MYF(MY_WME));
#ifdef HAVE_READLINE
+ if (select_db > 1)
build_completion_hash(rehash, 1);
#endif
- }
}
- else
- skip_updates= 0;
+
put_info("Database changed",INFO_INFO);
return 0;
}
diff --git a/client/mysqldump.c b/client/mysqldump.c
index 6df6a30be31..eadd45b960c 100644
--- a/client/mysqldump.c
+++ b/client/mysqldump.c
@@ -1643,9 +1643,15 @@ static uint get_table_structure(char *table, char *db, char *table_type,
field= mysql_fetch_field_direct(result, 0);
if (strcmp(field->name, "View") == 0)
{
+ char *scv_buff = NULL;
+
if (verbose)
fprintf(stderr, "-- It's a view, create dummy table for view\n");
+ /* save "show create" statement for later */
+ if ((row= mysql_fetch_row(result)) && (scv_buff=row[1]))
+ scv_buff= my_strdup(scv_buff, MYF(0));
+
mysql_free_result(result);
/*
@@ -1663,9 +1669,22 @@ static uint get_table_structure(char *table, char *db, char *table_type,
"SHOW FIELDS FROM %s", result_table);
if (mysql_query_with_error_report(sock, 0, query_buff))
{
+ /*
+ View references invalid or privileged table/col/fun (err 1356),
+ so we cannot create a stand-in table. Be defensive and dump
+ a comment with the view's 'show create' statement. (Bug #17371)
+ */
+
+ if (mysql_errno(sock) == ER_VIEW_INVALID)
+ fprintf(sql_file, "\n-- failed on view %s: %s\n\n", result_table, scv_buff ? scv_buff : "");
+
+ my_free(scv_buff, MYF(MY_ALLOW_ZERO_PTR));
+
safe_exit(EX_MYSQLERR);
- DBUG_RETURN(0);
+ DBUG_RETURN(0);
}
+ else
+ my_free(scv_buff, MYF(MY_ALLOW_ZERO_PTR));
if ((result= mysql_store_result(sock)))
{
@@ -1706,6 +1725,9 @@ static uint get_table_structure(char *table, char *db, char *table_type,
}
mysql_free_result(result);
+ if (path)
+ my_fclose(sql_file, MYF(MY_WME));
+
seen_views= 1;
DBUG_RETURN(0);
}
@@ -2363,15 +2385,16 @@ static void dump_table(char *table, char *db)
goto err;
}
- if (opt_disable_keys)
+ if (opt_lock)
{
- fprintf(md_result_file, "\n/*!40000 ALTER TABLE %s DISABLE KEYS */;\n",
- opt_quoted_table);
+ fprintf(md_result_file,"LOCK TABLES %s WRITE;\n", opt_quoted_table);
check_io(md_result_file);
}
- if (opt_lock)
+ /* Moved disable keys to after lock per bug 15977 */
+ if (opt_disable_keys)
{
- fprintf(md_result_file,"LOCK TABLES %s WRITE;\n", opt_quoted_table);
+ fprintf(md_result_file, "/*!40000 ALTER TABLE %s DISABLE KEYS */;\n",
+ opt_quoted_table);
check_io(md_result_file);
}
@@ -2635,17 +2658,19 @@ static void dump_table(char *table, char *db)
error= EX_CONSCHECK;
goto err;
}
- if (opt_lock)
- {
- fputs("UNLOCK TABLES;\n", md_result_file);
- check_io(md_result_file);
- }
+
+ /* Moved enable keys to before unlock per bug 15977 */
if (opt_disable_keys)
{
fprintf(md_result_file,"/*!40000 ALTER TABLE %s ENABLE KEYS */;\n",
opt_quoted_table);
check_io(md_result_file);
}
+ if (opt_lock)
+ {
+ fputs("UNLOCK TABLES;\n", md_result_file);
+ check_io(md_result_file);
+ }
if (opt_autocommit)
{
fprintf(md_result_file, "commit;\n");
@@ -3037,6 +3062,12 @@ static my_bool dump_all_views_in_db(char *database)
uint numrows;
char table_buff[NAME_LEN*2+3];
+ if (mysql_select_db(sock, database))
+ {
+ DB_error(sock, "when selecting the database");
+ return 1;
+ }
+
if (opt_xml)
print_xml_tag1(md_result_file, "", "database name=", database, "\n");
if (lock_tables)
@@ -3698,12 +3729,13 @@ static my_bool get_view_structure(char *table, char* db)
mysql_free_result(table_res);
/* Get the result from "select ... information_schema" */
- if (!(table_res= mysql_store_result(sock)))
+ if (!(table_res= mysql_store_result(sock)) ||
+ !(row= mysql_fetch_row(table_res)))
{
safe_exit(EX_MYSQLERR);
DBUG_RETURN(1);
}
- row= mysql_fetch_row(table_res);
+
lengths= mysql_fetch_lengths(table_res);
/*
diff --git a/client/mysqlimport.c b/client/mysqlimport.c
index ccd6932e25b..18a31117c08 100644
--- a/client/mysqlimport.c
+++ b/client/mysqlimport.c
@@ -35,9 +35,10 @@
/* Global Thread counter */
-int counter= 0;
+int counter;
#ifdef HAVE_LIBPTHREAD
pthread_mutex_t counter_mutex;
+pthread_cond_t count_threshhold;
#endif
static void db_error_with_table(MYSQL *mysql, char *table);
@@ -556,6 +557,7 @@ error:
pthread_mutex_lock(&counter_mutex);
counter--;
+ pthread_cond_signal(&count_threshhold);
pthread_mutex_unlock(&counter_mutex);
my_thread_end();
@@ -584,28 +586,26 @@ int main(int argc, char **argv)
{
pthread_t mainthread; /* Thread descriptor */
pthread_attr_t attr; /* Thread attributes */
+ pthread_attr_init(&attr);
+ pthread_attr_setdetachstate(&attr,
+ PTHREAD_CREATE_DETACHED);
+
VOID(pthread_mutex_init(&counter_mutex, NULL));
+ VOID(pthread_cond_init(&count_threshhold, NULL));
- for (; *argv != NULL; argv++) /* Loop through tables */
+ for (counter= 0; *argv != NULL; argv++) /* Loop through tables */
{
- /*
- If we hit thread count limit we loop until some threads exit.
- We sleep for a second, so that we don't chew up a lot of
- CPU in the loop.
- */
-sanity_label:
- if (counter == opt_use_threads)
+ pthread_mutex_lock(&counter_mutex);
+ while (counter == opt_use_threads)
{
- sleep(1);
- goto sanity_label;
+ struct timespec abstime;
+
+ set_timespec(abstime, 3);
+ pthread_cond_timedwait(&count_threshhold, &counter_mutex, &abstime);
}
- pthread_mutex_lock(&counter_mutex);
+ /* Before exiting the lock we set ourselves up for the next thread */
counter++;
pthread_mutex_unlock(&counter_mutex);
- pthread_attr_init(&attr);
- pthread_attr_setdetachstate(&attr,
- PTHREAD_CREATE_DETACHED);
-
/* now create the thread */
if (pthread_create(&mainthread, &attr, worker_thread,
(void *)*argv) != 0)
@@ -621,13 +621,18 @@ sanity_label:
/*
We loop until we know that all children have cleaned up.
*/
-loop_label:
- if (counter)
+ pthread_mutex_lock(&counter_mutex);
+ while (counter)
{
- sleep(1);
- goto loop_label;
+ struct timespec abstime;
+
+ set_timespec(abstime, 3);
+ pthread_cond_timedwait(&count_threshhold, &counter_mutex, &abstime);
}
+ pthread_mutex_unlock(&counter_mutex);
VOID(pthread_mutex_destroy(&counter_mutex));
+ VOID(pthread_cond_destroy(&count_threshhold));
+ pthread_attr_destroy(&attr);
}
else
#endif
diff --git a/client/mysqlslap.c b/client/mysqlslap.c
index 11d3ae5a2df..9c8585915a9 100644
--- a/client/mysqlslap.c
+++ b/client/mysqlslap.c
@@ -130,6 +130,8 @@ static my_bool opt_compress= FALSE, tty_password= FALSE,
opt_silent= FALSE,
auto_generate_sql= FALSE;
+static unsigned long connect_flags= CLIENT_MULTI_RESULTS;
+
static int verbose, num_int_cols, num_char_cols, delimiter_length;
static int iterations;
static char *default_charset= (char*) MYSQL_DEFAULT_CHARSET_NAME;
@@ -243,7 +245,6 @@ static int gettimeofday(struct timeval *tp, void *tzp)
int main(int argc, char **argv)
{
MYSQL mysql;
- int client_flag= 0;
int x;
unsigned long long client_limit;
statement *eptr;
@@ -293,12 +294,11 @@ int main(int argc, char **argv)
#endif
mysql_options(&mysql, MYSQL_SET_CHARSET_NAME, default_charset);
- client_flag|= CLIENT_MULTI_RESULTS;
if (!opt_only_print)
{
if (!(mysql_real_connect(&mysql, host, user, opt_password,
NULL, opt_mysql_port,
- opt_mysql_unix_port, client_flag)))
+ opt_mysql_unix_port, connect_flags)))
{
fprintf(stderr,"%s: Error when connecting to server: %s\n",
my_progname,mysql_error(&mysql));
@@ -1113,6 +1113,7 @@ WAIT:
DBUG_RETURN(0);
}
+
int
run_task(thread_context *con)
{
@@ -1137,13 +1138,27 @@ run_task(thread_context *con)
my_lock(lock_file, F_RDLCK, 0, F_TO_EOF, MYF(0));
if (!opt_only_print)
{
- if (!(mysql_real_connect(mysql, host, user, opt_password,
+ /* Connect to server */
+ static ulong connection_retry_sleep= 100000; /* Microseconds */
+ int i, connect_error= 1;
+ for (i= 0; i < 10; i++)
+ {
+ if (mysql_real_connect(mysql, host, user, opt_password,
create_schema_string,
opt_mysql_port,
opt_mysql_unix_port,
- 0)))
+ connect_flags))
+ {
+ /* Connect suceeded */
+ connect_error= 0;
+ break;
+ }
+ my_sleep(connection_retry_sleep);
+ }
+ if (connect_error)
{
- fprintf(stderr,"%s: %s\n",my_progname,mysql_error(mysql));
+ fprintf(stderr,"%s: Error when connecting to server: %d %s\n",
+ my_progname, mysql_errno(mysql), mysql_error(mysql));
goto end;
}
}
diff --git a/client/mysqltest.c b/client/mysqltest.c
index b21e3883631..3b55ebb2f8b 100644
--- a/client/mysqltest.c
+++ b/client/mysqltest.c
@@ -100,7 +100,8 @@ enum {OPT_MANAGER_USER=256,OPT_MANAGER_HOST,OPT_MANAGER_PASSWD,
OPT_MANAGER_PORT,OPT_MANAGER_WAIT_TIMEOUT, OPT_SKIP_SAFEMALLOC,
OPT_SSL_SSL, OPT_SSL_KEY, OPT_SSL_CERT, OPT_SSL_CA, OPT_SSL_CAPATH,
OPT_SSL_CIPHER,OPT_PS_PROTOCOL,OPT_SP_PROTOCOL,OPT_CURSOR_PROTOCOL,
- OPT_VIEW_PROTOCOL, OPT_SSL_VERIFY_SERVER_CERT, OPT_MAX_CONNECT_RETRIES};
+ OPT_VIEW_PROTOCOL, OPT_SSL_VERIFY_SERVER_CERT, OPT_MAX_CONNECT_RETRIES,
+ OPT_MARK_PROGRESS};
/* ************************************************************************ */
/*
@@ -153,6 +154,7 @@ static int port = 0;
static int opt_max_connect_retries;
static my_bool opt_big_test= 0, opt_compress= 0, silent= 0, verbose = 0;
static my_bool tty_password= 0;
+static my_bool opt_mark_progress= 0;
static my_bool ps_protocol= 0, ps_protocol_enabled= 0;
static my_bool sp_protocol= 0, sp_protocol_enabled= 0;
static my_bool view_protocol= 0, view_protocol_enabled= 0;
@@ -234,7 +236,7 @@ static my_bool display_result_vertically= FALSE, display_metadata= FALSE;
/* See the timer_output() definition for details */
static char *timer_file = NULL;
-static ulonglong timer_start;
+static ulonglong timer_start, progress_start= 0;
static int got_end_timer= FALSE;
static void timer_output(void);
static ulonglong timer_now(void);
@@ -445,7 +447,7 @@ const char *command_names[]=
TYPELIB command_typelib= {array_elements(command_names),"",
command_names, 0};
-DYNAMIC_STRING ds_res;
+DYNAMIC_STRING ds_res, ds_progress;
static void die(const char *fmt, ...);
static void init_var_hash();
static VAR* var_from_env(const char *, const char *);
@@ -631,6 +633,7 @@ static void free_used_memory()
my_free(embedded_server_args[--embedded_server_arg_count],MYF(0));
delete_dynamic(&q_lines);
dynstr_free(&ds_res);
+ dynstr_free(&ds_progress);
free_replace();
free_replace_column();
my_free(pass,MYF(MY_ALLOW_ZERO_PTR));
@@ -1204,7 +1207,7 @@ static void do_exec(struct st_query *query)
}
}
error= pclose(res_file);
- if (error != 0)
+ if (error > 0)
{
uint status= WEXITSTATUS(error), i;
my_bool ok= 0;
@@ -2518,7 +2521,7 @@ int safe_connect(MYSQL* mysql, const char *host, const char *user,
{
int con_error= 1;
my_bool reconnect= 1;
- static int connection_retry_sleep= 2; /* Seconds */
+ static ulong connection_retry_sleep= 100000; /* Microseconds */
int i;
for (i= 0; i < opt_max_connect_retries; i++)
{
@@ -2528,7 +2531,7 @@ int safe_connect(MYSQL* mysql, const char *host, const char *user,
con_error= 0;
break;
}
- sleep(connection_retry_sleep);
+ my_sleep(connection_retry_sleep);
}
/*
TODO: change this to 0 in future versions, but the 'kill' test relies on
@@ -3298,10 +3301,14 @@ static struct my_option my_long_options[] =
{"manager-wait-timeout", OPT_MANAGER_WAIT_TIMEOUT,
"Undocumented: Used for debugging.", (gptr*) &manager_wait_timeout,
(gptr*) &manager_wait_timeout, 0, GET_INT, REQUIRED_ARG, 3, 0, 0, 0, 0, 0},
+ {"mark-progress", OPT_MARK_PROGRESS,
+ "Write linenumber and elapsed time to <testname>.progress ",
+ (gptr*) &opt_mark_progress, (gptr*) &opt_mark_progress, 0,
+ GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"max-connect-retries", OPT_MAX_CONNECT_RETRIES,
"Max number of connection attempts when connecting to server",
(gptr*) &opt_max_connect_retries, (gptr*) &opt_max_connect_retries, 0,
- GET_INT, REQUIRED_ARG, 5, 1, 10, 0, 0, 0},
+ GET_INT, REQUIRED_ARG, 500, 1, 10000, 0, 0, 0},
{"password", 'p', "Password to use when connecting to server.",
0, 0, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
{"port", 'P', "Port number to use for connection.", (gptr*) &port,
@@ -3537,6 +3544,13 @@ void dump_result_to_log_file(const char *record_file, char *buf, int size)
str_to_file(fn_format(log_file, record_file,"",".log",2), buf, size);
}
+void dump_progress(const char *record_file)
+{
+ char log_file[FN_REFLEN];
+ str_to_file(fn_format(log_file, record_file,"",".progress",2),
+ ds_progress.str, ds_progress.length);
+}
+
static void check_regerr(my_regex_t* r, int err)
{
char err_buf[1024];
@@ -5116,36 +5130,47 @@ static void init_var_hash(MYSQL *mysql)
DBUG_VOID_RETURN;
}
-static void mark_progress(int line __attribute__((unused)))
+
+/*
+ Record how many milliseconds it took to execute the test file
+ up until the current line and save it in the dynamic string ds_progress.
+
+ The ds_progress will be dumped to <test_name>.progress when
+ test run completes
+
+*/
+static void mark_progress(struct st_query* q, int line)
{
-#ifdef NOT_YET
- static FILE* fp = NULL;
- static double first;
+ char buf[32], *end;
+ ulonglong timer= timer_now();
+ if (!progress_start)
+ progress_start= timer;
+ timer-= progress_start;
- struct timeval tv;
- double now;
+ /* Milliseconds since start */
+ end= longlong2str(timer, buf, 10);
+ dynstr_append_mem(&ds_progress, buf, (int)(end-buf));
+ dynstr_append_mem(&ds_progress, "\t", 1);
- if (!fp)
- {
+ /* Parser line number */
+ end= int10_to_str(line, buf, 10);
+ dynstr_append_mem(&ds_progress, buf, (int)(end-buf));
+ dynstr_append_mem(&ds_progress, "\t", 1);
- fp = fopen("/tmp/mysqltest_progress.log", "wt");
+ /* Filename */
+ dynstr_append(&ds_progress, cur_file->file_name);
+ dynstr_append_mem(&ds_progress, ":", 1);
- if (!fp)
- {
- abort();
- }
+ /* Line in file */
+ end= int10_to_str(cur_file->lineno, buf, 10);
+ dynstr_append_mem(&ds_progress, buf, (int)(end-buf));
- gettimeofday(&tv, NULL);
- first = tv.tv_sec * 1e6 + tv.tv_usec;
- }
- gettimeofday(&tv, NULL);
- now = tv.tv_sec * 1e6 + tv.tv_usec;
+ dynstr_append_mem(&ds_progress, "\n", 1);
- fprintf(fp, "%d %f\n", parser.current_line, (now - first) / 1e6);
-#endif
}
+
int main(int argc, char **argv)
{
struct st_query *q;
@@ -5185,6 +5210,7 @@ int main(int argc, char **argv)
memset(&master_pos, 0, sizeof(master_pos));
init_dynamic_string(&ds_res, "", 0, 65536);
+ init_dynamic_string(&ds_progress, "", 0, 2048);
parse_args(argc, argv);
DBUG_PRINT("info",("result_file: '%s'", result_file ? result_file : ""));
@@ -5509,7 +5535,8 @@ int main(int argc, char **argv)
}
parser.current_line += current_line_inc;
- mark_progress(parser.current_line);
+ if ( opt_mark_progress )
+ mark_progress(q, parser.current_line);
}
start_lineno= 0;
@@ -5560,6 +5587,9 @@ int main(int argc, char **argv)
die("No queries executed but result file found!");
}
+ if ( opt_mark_progress )
+ dump_progress(result_file);
+ dynstr_free(&ds_progress);
dynstr_free(&ds_res);
diff --git a/configure.in b/configure.in
index 1306a3cc74d..14a20322e69 100644
--- a/configure.in
+++ b/configure.in
@@ -15,7 +15,7 @@ DOT_FRM_VERSION=6
# See the libtool docs for information on how to do shared lib versions.
SHARED_LIB_MAJOR_VERSION=15
SHARED_LIB_VERSION=$SHARED_LIB_MAJOR_VERSION:0:0
-
+
# Set all version vars based on $VERSION. How do we do this more elegant ?
# Remember that regexps needs to quote [ and ] since this is run through m4
MYSQL_NO_DASH_VERSION=`echo $VERSION | sed -e "s|[[a-z]]*-.*$||"`
@@ -358,18 +358,18 @@ fi
AC_SUBST(LD_VERSION_SCRIPT)
# Avoid bug in fcntl on some versions of linux
-AC_MSG_CHECKING("if we should use 'skip-locking' as default for $target_os")
+AC_MSG_CHECKING([if we should use 'skip-external-locking' as default for $target_os])
# Any variation of Linux
if expr "$target_os" : "[[Ll]]inux.*" > /dev/null
then
- MYSQLD_DEFAULT_SWITCHES="--skip-locking"
+ MYSQLD_DEFAULT_SWITCHES="--skip-external-locking"
TARGET_LINUX="true"
- AC_MSG_RESULT("yes")
+ AC_MSG_RESULT([yes])
AC_DEFINE([TARGET_OS_LINUX], [1], [Whether we build for Linux])
else
MYSQLD_DEFAULT_SWITCHES=""
TARGET_LINUX="false"
- AC_MSG_RESULT("no")
+ AC_MSG_RESULT([no])
fi
AC_SUBST(MYSQLD_DEFAULT_SWITCHES)
AC_SUBST(TARGET_LINUX)
@@ -618,7 +618,7 @@ AC_ARG_ENABLE(assembler,
AC_MSG_CHECKING(if we should use assembler functions)
# For now we only support assembler on i386 and sparc systems
-AM_CONDITIONAL(ASSEMBLER_x86, test "$ENABLE_ASSEMBLER" = "yes" -a "$BASE_MACHINE_TYPE" = "i386")
+AM_CONDITIONAL(ASSEMBLER_x86, test "$ENABLE_ASSEMBLER" = "yes" -a "$BASE_MACHINE_TYPE" = "i386" && $AS strings/strings-x86.s -o checkassembler >/dev/null 2>&1 && test -f checkassembler && (rm -f checkassembler; exit 0;))
AM_CONDITIONAL(ASSEMBLER_sparc32, test "$ENABLE_ASSEMBLER" = "yes" -a "$BASE_MACHINE_TYPE" = "sparc")
AM_CONDITIONAL(ASSEMBLER_sparc64, test "$ENABLE_ASSEMBLER" = "yes" -a "$BASE_MACHINE_TYPE" = "sparcv9")
AM_CONDITIONAL(ASSEMBLER, test "$ASSEMBLER_x86_TRUE" = "" -o "$ASSEMBLER_sparc32_TRUE" = "")
diff --git a/extra/yassl/FLOSS-EXCEPTIONS b/extra/yassl/FLOSS-EXCEPTIONS
new file mode 100644
index 00000000000..344083b0114
--- /dev/null
+++ b/extra/yassl/FLOSS-EXCEPTIONS
@@ -0,0 +1,120 @@
+yaSSL FLOSS License Exception
+****************************************
+
+Version 0.1, 26 June 2006
+
+The Sawtooth Consulting Ltd. Exception for Free/Libre and Open Source
+Software-only Applications Using yaSSL Libraries (the "FLOSS Exception").
+
+*Exception Intent*
+
+We want specified Free/Libre and Open Source Software ("FLOSS")
+applications to be able to use specified GPL-licensed yaSSL
+libraries (the "Program") despite the fact that not all FLOSS
+licenses are compatible with version 2 of the GNU General Public
+License (the "GPL").
+
+*Legal Terms and Conditions*
+
+As a special exception to the terms and conditions of version 2.0 of
+the GPL:
+
+ 1. You are free to distribute a Derivative Work that is formed
+ entirely from the Program and one or more works (each, a "FLOSS
+ Work") licensed under one or more of the licenses listed below
+ in section 1, as long as:
+
+ 1. You obey the GPL in all respects for the Program and the
+ Derivative Work, except for identifiable sections of the
+ Derivative Work which are not derived from the Program,
+ and which can reasonably be considered independent and
+ separate works in themselves,
+
+ 2. all identifiable sections of the Derivative Work which
+ are not derived from the Program, and which can reasonably be
+ considered independent and separate works in themselves,
+
+ * i
+
+ are distributed subject to one of the FLOSS licenses
+ listed below, and
+
+ * ii
+
+ the object code or executable form of those sections are
+ accompanied by the complete corresponding machine-readable
+ source code for those sections on the same medium and under
+ the same FLOSS license as the corresponding object code or
+ executable forms of those sections, and
+
+
+ 3. any works which are aggregated with the Program or with
+ a Derivative Work on a volume of a storage or distribution
+ medium in accordance with the GPL, can reasonably be considered
+ independent and separate works in themselves which are not
+ derivatives of either the Program, a Derivative Work or a FLOSS
+ Work.
+
+
+ If the above conditions are not met, then the Program may only be
+ copied, modified, distributed or used under the terms and
+ conditions of the GPL or another valid licensing option from
+ Sawtooth Consulting Ltd.
+
+ 2. FLOSS License List
+
+ *License name* *Version(s)/Copyright Date*
+ Academic Free License 2.0
+ Apache Software License 1.0/1.1/2.0
+ Apple Public Source License 2.0
+ Artistic license From Perl 5.8.0
+ BSD license "July 22 1999"
+ Common Development and Distribution License (CDDL) 1.0
+ Common Public License 1.0
+ GNU Library or "Lesser" General Public 2.0/2.1
+ License (LGPL)
+ Jabber Open Source License 1.0
+ MIT license -
+ Mozilla Public License (MPL) 1.0/1.1
+ Open Software License 2.0
+ PHP License 3.0
+ Python license (CNRI Python License) -
+ Python Software Foundation License 2.1.1
+ Sleepycat License "1999"
+ W3C License "2001"
+ X11 License "2001"
+ Zlib/libpng License -
+ Zope Public License 2.0
+
+ Due to the many variants of some of the above licenses, we require
+ that any version follow the 2003 version of the Free Software
+ Foundation's Free Software Definition
+ (http://www.gnu.org/philosophy/free-sw.html
+ (http://www.gnu.org/philosophy/free-sw.html)) or version 1.9 of
+ the Open Source Definition by the Open Source Initiative
+ (http://www.opensource.org/docs/definition.php
+ (http://www.opensource.org/docs/definition.php)).
+
+ 3. Definitions
+
+ 1. Terms used, but not defined, herein shall have the
+ meaning provided in the GPL.
+
+ 2. Derivative Work means a derivative work under copyright
+ law.
+
+
+ 4. Applicability This FLOSS Exception applies to all Programs that
+ contain a notice placed by Sawtooth Consulting Ltd. saying that the
+ Program may be distributed under the terms of this FLOSS Exception.
+ If you create or distribute a work which is a Derivative Work of
+ both the Program and any other work licensed under the GPL, then
+ this FLOSS Exception is not available for that work; thus, you
+ must remove the FLOSS Exception notice from that work and
+ comply with the GPL in all respects, including by retaining all
+ GPL notices. You may choose to redistribute a copy of the
+ Program exclusively under the terms of the GPL by removing the
+ FLOSS Exception notice from that copy of the Program, provided
+ that the copy has never been modified by you or any third party.
+
+
diff --git a/extra/yassl/README b/extra/yassl/README
index a5ff70aa6f6..25d4d94c306 100644
--- a/extra/yassl/README
+++ b/extra/yassl/README
@@ -1,4 +1,14 @@
-yaSSL Release notes, version 1.3.5 (06/01/06)
+yaSSL Release notes, version 1.3.7 (06/26/06)
+
+
+ This release of yaSSL contains bug fixes, portability enhancements,
+ and libcurl 7.15.4 support (any newer versions may not build).
+
+See normal build instructions below under 1.0.6.
+See libcurl build instructions below under 1.3.0.
+
+
+********************yaSSL Release notes, version 1.3.5 (06/01/06)
This release of yaSSL contains bug fixes, portability enhancements,
diff --git a/extra/yassl/include/buffer.hpp b/extra/yassl/include/buffer.hpp
index 6b0e9e65389..4816f79a9bc 100644
--- a/extra/yassl/include/buffer.hpp
+++ b/extra/yassl/include/buffer.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/include/cert_wrapper.hpp b/extra/yassl/include/cert_wrapper.hpp
index 2a214c529fd..8b5b7491772 100644
--- a/extra/yassl/include/cert_wrapper.hpp
+++ b/extra/yassl/include/cert_wrapper.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/include/crypto_wrapper.hpp b/extra/yassl/include/crypto_wrapper.hpp
index cb542c25a67..4c4e4d5da5b 100644
--- a/extra/yassl/include/crypto_wrapper.hpp
+++ b/extra/yassl/include/crypto_wrapper.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
@@ -36,6 +40,7 @@
#define yaSSL_CRYPTO_WRAPPER_HPP
#include "yassl_types.hpp"
+#include <stdio.h> // FILE
namespace yaSSL {
@@ -410,7 +415,8 @@ private:
class x509;
-x509* PemToDer(const char*, CertType);
+
+x509* PemToDer(FILE*, CertType);
} // naemspace
diff --git a/extra/yassl/include/factory.hpp b/extra/yassl/include/factory.hpp
index f457188f587..5619e90cd62 100644
--- a/extra/yassl/include/factory.hpp
+++ b/extra/yassl/include/factory.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/include/handshake.hpp b/extra/yassl/include/handshake.hpp
index 6261359cd58..ea390fee322 100644
--- a/extra/yassl/include/handshake.hpp
+++ b/extra/yassl/include/handshake.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
@@ -55,7 +59,7 @@ void sendCertificateVerify(SSL&, BufferOutput = buffered);
int sendData(SSL&, const void*, int);
int sendAlert(SSL& ssl, const Alert& alert);
-int receiveData(SSL&, Data&);
+int receiveData(SSL&, Data&, bool peek = false);
void processReply(SSL&);
void buildFinished(SSL&, Finished&, const opaque*);
diff --git a/extra/yassl/include/lock.hpp b/extra/yassl/include/lock.hpp
index f5231528821..5b585b1cf16 100644
--- a/extra/yassl/include/lock.hpp
+++ b/extra/yassl/include/lock.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/include/log.hpp b/extra/yassl/include/log.hpp
index c20116901ad..33cb38ebae0 100644
--- a/extra/yassl/include/log.hpp
+++ b/extra/yassl/include/log.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/include/openssl/ssl.h b/extra/yassl/include/openssl/ssl.h
index af801029561..47b4d075894 100644
--- a/extra/yassl/include/openssl/ssl.h
+++ b/extra/yassl/include/openssl/ssl.h
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
@@ -36,6 +40,10 @@
#include "opensslv.h" /* for version number */
#include "rsa.h"
+
+#define YASSL_VERSION "1.3.7"
+
+
#if defined(__cplusplus)
extern "C" {
#endif
@@ -414,6 +422,7 @@ int RAND_load_file(const char*, long);
/* for libcurl */
int RAND_status(void);
+int RAND_bytes(unsigned char* buf, int num);
int DES_set_key(const_DES_cblock*, DES_key_schedule*);
void DES_set_odd_parity(DES_cblock*);
@@ -421,6 +430,7 @@ void DES_ecb_encrypt(DES_cblock*, DES_cblock*, DES_key_schedule*, int);
void SSL_CTX_set_default_passwd_cb_userdata(SSL_CTX*, void* userdata);
void SSL_SESSION_free(SSL_SESSION* session);
+int SSL_peek(SSL* ssl, void* buf, int num);
X509* SSL_get_certificate(SSL* ssl);
EVP_PKEY* SSL_get_privatekey(SSL* ssl);
@@ -513,6 +523,8 @@ void MD5_Init(MD5_CTX*);
void MD5_Update(MD5_CTX*, const void*, unsigned long);
void MD5_Final(unsigned char*, MD5_CTX*);
+#define MD5_DIGEST_LENGTH 16
+
#define SSL_DEFAULT_CIPHER_LIST "" /* default all */
diff --git a/extra/yassl/include/socket_wrapper.hpp b/extra/yassl/include/socket_wrapper.hpp
index 16db142b3a2..1dd61b63148 100644
--- a/extra/yassl/include/socket_wrapper.hpp
+++ b/extra/yassl/include/socket_wrapper.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/include/timer.hpp b/extra/yassl/include/timer.hpp
index 3025b7a0bd9..ff90aa884de 100644
--- a/extra/yassl/include/timer.hpp
+++ b/extra/yassl/include/timer.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/include/yassl_error.hpp b/extra/yassl/include/yassl_error.hpp
index 2f35fecb59b..3c3d5fa5231 100644
--- a/extra/yassl/include/yassl_error.hpp
+++ b/extra/yassl/include/yassl_error.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/include/yassl_imp.hpp b/extra/yassl/include/yassl_imp.hpp
index 2f240b71c03..838aace72c8 100644
--- a/extra/yassl/include/yassl_imp.hpp
+++ b/extra/yassl/include/yassl_imp.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
@@ -664,9 +668,9 @@ struct Parameters {
char cipher_name_[MAX_SUITE_NAME];
char cipher_list_[MAX_CIPHERS][MAX_SUITE_NAME];
- Parameters(ConnectionEnd, const Ciphers&, ProtocolVersion);
+ Parameters(ConnectionEnd, const Ciphers&, ProtocolVersion, bool haveDH);
- void SetSuites(ProtocolVersion pv);
+ void SetSuites(ProtocolVersion pv, bool removeDH = false);
void SetCipherNames();
private:
Parameters(const Parameters&); // hide copy
diff --git a/extra/yassl/include/yassl_int.hpp b/extra/yassl/include/yassl_int.hpp
index 633b75d479f..26900aed3af 100644
--- a/extra/yassl/include/yassl_int.hpp
+++ b/extra/yassl/include/yassl_int.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
@@ -429,9 +433,10 @@ private:
// holds input and output buffers
class Buffers {
+public:
typedef mySTL::list<input_buffer*> inputList;
typedef mySTL::list<output_buffer*> outputList;
-
+private:
inputList dataList_; // list of users app data / handshake
outputList handShakeList_; // buffered handshake msgs
public:
@@ -458,7 +463,7 @@ class Security {
bool resuming_; // trying to resume
public:
Security(ProtocolVersion, RandomPool&, ConnectionEnd, const Ciphers&,
- SSL_CTX*);
+ SSL_CTX*, bool);
const SSL_CTX* GetContext() const;
const Connection& get_connection() const;
@@ -521,6 +526,7 @@ public:
void makeTLSMasterSecret();
void addData(input_buffer* data);
void fillData(Data&);
+ void PeekData(Data&);
void addBuffer(output_buffer* b);
void flushBuffer();
void verifyState(const RecordLayerHeader&);
diff --git a/extra/yassl/include/yassl_types.hpp b/extra/yassl/include/yassl_types.hpp
index 76c807cd05f..b75a2a45302 100644
--- a/extra/yassl/include/yassl_types.hpp
+++ b/extra/yassl/include/yassl_types.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/mySTL/algorithm.hpp b/extra/yassl/mySTL/algorithm.hpp
index 3ceb0ca5fdc..efc7aa21a07 100644
--- a/extra/yassl/mySTL/algorithm.hpp
+++ b/extra/yassl/mySTL/algorithm.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/mySTL/helpers.hpp b/extra/yassl/mySTL/helpers.hpp
index df79025197a..c4449519db3 100644
--- a/extra/yassl/mySTL/helpers.hpp
+++ b/extra/yassl/mySTL/helpers.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/mySTL/list.hpp b/extra/yassl/mySTL/list.hpp
index dd8485f48a7..11a1a914868 100644
--- a/extra/yassl/mySTL/list.hpp
+++ b/extra/yassl/mySTL/list.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/mySTL/memory.hpp b/extra/yassl/mySTL/memory.hpp
index cc70fbf60d8..f480af12316 100644
--- a/extra/yassl/mySTL/memory.hpp
+++ b/extra/yassl/mySTL/memory.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/mySTL/pair.hpp b/extra/yassl/mySTL/pair.hpp
index c9bb03b5953..13916fece13 100644
--- a/extra/yassl/mySTL/pair.hpp
+++ b/extra/yassl/mySTL/pair.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/mySTL/stdexcept.hpp b/extra/yassl/mySTL/stdexcept.hpp
index 33ea43bf0e0..4fd343ae6fd 100644
--- a/extra/yassl/mySTL/stdexcept.hpp
+++ b/extra/yassl/mySTL/stdexcept.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/mySTL/vector.hpp b/extra/yassl/mySTL/vector.hpp
index 9eab91cfda8..6a412447b91 100644
--- a/extra/yassl/mySTL/vector.hpp
+++ b/extra/yassl/mySTL/vector.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/src/buffer.cpp b/extra/yassl/src/buffer.cpp
index 3bc6dced887..4d396a8d29f 100644
--- a/extra/yassl/src/buffer.cpp
+++ b/extra/yassl/src/buffer.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/src/cert_wrapper.cpp b/extra/yassl/src/cert_wrapper.cpp
index ae609b510ba..6ad0aa568ed 100644
--- a/extra/yassl/src/cert_wrapper.cpp
+++ b/extra/yassl/src/cert_wrapper.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
@@ -182,7 +186,8 @@ int CertManager::CopyCaCert(const x509* x)
signers_.push_back(NEW_YS TaoCrypt::Signer(key.GetKey(), key.size(),
cert.GetCommonName(), cert.GetHash()));
}
- return cert.GetError().What();
+ // just don't add, not an error return cert.GetError().What();
+ return 0;
}
diff --git a/extra/yassl/src/crypto_wrapper.cpp b/extra/yassl/src/crypto_wrapper.cpp
index 8859fbdd70f..799106ec7c0 100644
--- a/extra/yassl/src/crypto_wrapper.cpp
+++ b/extra/yassl/src/crypto_wrapper.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
@@ -904,7 +908,7 @@ void DiffieHellman::get_parms(byte* bp, byte* bg, byte* bpub) const
// convert PEM file to DER x509 type
-x509* PemToDer(const char* fname, CertType type)
+x509* PemToDer(FILE* file, CertType type)
{
using namespace TaoCrypt;
@@ -919,10 +923,6 @@ x509* PemToDer(const char* fname, CertType type)
strncpy(footer, "-----END RSA PRIVATE KEY-----", sizeof(header));
}
- FILE* file = fopen(fname, "rb");
- if (!file)
- return 0;
-
long begin = -1;
long end = 0;
bool foundEnd = false;
@@ -943,18 +943,14 @@ x509* PemToDer(const char* fname, CertType type)
else
end = ftell(file);
- if (begin == -1 || !foundEnd) {
- fclose(file);
+ if (begin == -1 || !foundEnd)
return 0;
- }
input_buffer tmp(end - begin);
fseek(file, begin, SEEK_SET);
size_t bytes = fread(tmp.get_buffer(), end - begin, 1, file);
- if (bytes != 1) {
- fclose(file);
+ if (bytes != 1)
return 0;
- }
Source der(tmp.get_buffer(), end - begin);
Base64Decoder b64Dec(der);
@@ -963,7 +959,6 @@ x509* PemToDer(const char* fname, CertType type)
mySTL::auto_ptr<x509> x(NEW_YS x509(sz), ysDelete);
memcpy(x->use_buffer(), der.get_buffer(), sz);
- fclose(file);
return x.release();
}
diff --git a/extra/yassl/src/handshake.cpp b/extra/yassl/src/handshake.cpp
index 66ec64f4af8..e93f5385b3d 100644
--- a/extra/yassl/src/handshake.cpp
+++ b/extra/yassl/src/handshake.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
@@ -912,7 +916,7 @@ int sendAlert(SSL& ssl, const Alert& alert)
// process input data
-int receiveData(SSL& ssl, Data& data)
+int receiveData(SSL& ssl, Data& data, bool peek)
{
if (ssl.GetError() == YasslError(SSL_ERROR_WANT_READ))
ssl.SetError(no_error);
@@ -922,9 +926,13 @@ int receiveData(SSL& ssl, Data& data)
if (!ssl.bufferedData())
processReply(ssl);
+
+ if (peek)
+ ssl.PeekData(data);
+ else
ssl.fillData(data);
- ssl.useLog().ShowData(data.get_length());
+ ssl.useLog().ShowData(data.get_length());
if (ssl.GetError()) return -1;
if (data.get_length() == 0 && ssl.getSocket().WouldBlock()) {
diff --git a/extra/yassl/src/lock.cpp b/extra/yassl/src/lock.cpp
index 4827d396e81..0f4c80b1616 100644
--- a/extra/yassl/src/lock.cpp
+++ b/extra/yassl/src/lock.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/src/log.cpp b/extra/yassl/src/log.cpp
index 8ab351ee2b1..c8030787f3d 100644
--- a/extra/yassl/src/log.cpp
+++ b/extra/yassl/src/log.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/src/socket_wrapper.cpp b/extra/yassl/src/socket_wrapper.cpp
index 06b403c999d..7790001fc2d 100644
--- a/extra/yassl/src/socket_wrapper.cpp
+++ b/extra/yassl/src/socket_wrapper.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/src/ssl.cpp b/extra/yassl/src/ssl.cpp
index 07f5e9859b2..81e585ff735 100644
--- a/extra/yassl/src/ssl.cpp
+++ b/extra/yassl/src/ssl.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
@@ -64,12 +68,15 @@ int read_file(SSL_CTX* ctx, const char* file, int format, CertType type)
return SSL_BAD_FILE;
if (type == CA) {
- x509* ptr = PemToDer(file, Cert);
- if (!ptr) {
+ // may have a bunch of CAs
+ x509* ptr;
+ while ( (ptr = PemToDer(input, Cert)) )
+ ctx->AddCA(ptr);
+
+ if (!feof(input)) {
fclose(input);
return SSL_BAD_FILE;
}
- ctx->AddCA(ptr); // takes ownership
}
else {
x509*& x = (type == Cert) ? ctx->certificate_ : ctx->privateKey_;
@@ -86,7 +93,7 @@ int read_file(SSL_CTX* ctx, const char* file, int format, CertType type)
}
}
else {
- x = PemToDer(file, type);
+ x = PemToDer(input, type);
if (!x) {
fclose(input);
return SSL_BAD_FILE;
@@ -1189,6 +1196,35 @@ void MD5_Final(unsigned char* hash, MD5_CTX* md5)
}
+int RAND_bytes(unsigned char* buf, int num)
+{
+ RandomPool ran;
+
+ if (ran.GetError()) return 0;
+
+ ran.Fill(buf, num);
+ return 1;
+}
+
+
+int SSL_peek(SSL* ssl, void* buffer, int sz)
+{
+ Data data(min(sz, MAX_RECORD_SIZE), static_cast<opaque*>(buffer));
+ return receiveData(*ssl, data, true);
+}
+
+
+int SSL_pending(SSL* ssl)
+{
+ // Just in case there's pending data that hasn't been processed yet...
+ char c;
+ SSL_peek(ssl, &c, 1);
+
+ return ssl->bufferedData();
+}
+
+
+
// functions for stunnel
void RAND_screen()
@@ -1352,12 +1388,6 @@ void MD5_Final(unsigned char* hash, MD5_CTX* md5)
}
- int SSL_pending(SSL*)
- {
- return SSL_SUCCESS; // TODO:
- }
-
-
int SSL_want_read(SSL*)
{
return 0; // TODO:
diff --git a/extra/yassl/src/template_instnt.cpp b/extra/yassl/src/template_instnt.cpp
index ce8972c72fe..c5fc23dabdb 100644
--- a/extra/yassl/src/template_instnt.cpp
+++ b/extra/yassl/src/template_instnt.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/src/timer.cpp b/extra/yassl/src/timer.cpp
index 8b7d2d17a84..8500d09120b 100644
--- a/extra/yassl/src/timer.cpp
+++ b/extra/yassl/src/timer.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/src/yassl.cpp b/extra/yassl/src/yassl.cpp
index 86af12fd448..5bc8bad8bbc 100644
--- a/extra/yassl/src/yassl.cpp
+++ b/extra/yassl/src/yassl.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/src/yassl_error.cpp b/extra/yassl/src/yassl_error.cpp
index 72b8e459241..4f75de34a98 100644
--- a/extra/yassl/src/yassl_error.cpp
+++ b/extra/yassl/src/yassl_error.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/src/yassl_imp.cpp b/extra/yassl/src/yassl_imp.cpp
index 4d6d1fc7aff..310e8819c54 100644
--- a/extra/yassl/src/yassl_imp.cpp
+++ b/extra/yassl/src/yassl_imp.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
@@ -428,7 +432,7 @@ opaque* DH_Server::get_serverKey() const
// set available suites
Parameters::Parameters(ConnectionEnd ce, const Ciphers& ciphers,
- ProtocolVersion pv) : entity_(ce)
+ ProtocolVersion pv, bool haveDH) : entity_(ce)
{
pending_ = true; // suite not set yet
@@ -438,11 +442,11 @@ Parameters::Parameters(ConnectionEnd ce, const Ciphers& ciphers,
SetCipherNames();
}
else
- SetSuites(pv); // defaults
+ SetSuites(pv, ce == server_end && !haveDH); // defaults
}
-void Parameters::SetSuites(ProtocolVersion pv)
+void Parameters::SetSuites(ProtocolVersion pv, bool removeDH)
{
int i = 0;
// available suites, best first
@@ -450,19 +454,23 @@ void Parameters::SetSuites(ProtocolVersion pv)
// MAX_CIPHERS is big enough
if (isTLS(pv)) {
+ if (!removeDH) {
suites_[i++] = 0x00;
suites_[i++] = TLS_DHE_RSA_WITH_AES_256_CBC_SHA;
suites_[i++] = 0x00;
suites_[i++] = TLS_DHE_DSS_WITH_AES_256_CBC_SHA;
+ }
suites_[i++] = 0x00;
suites_[i++] = TLS_RSA_WITH_AES_256_CBC_SHA;
- suites_[i++] = 0x00;
- suites_[i++] = TLS_RSA_WITH_AES_128_CBC_SHA;
+ if (!removeDH) {
suites_[i++] = 0x00;
suites_[i++] = TLS_DHE_RSA_WITH_AES_128_CBC_SHA;
suites_[i++] = 0x00;
suites_[i++] = TLS_DHE_DSS_WITH_AES_128_CBC_SHA;
+ }
+ suites_[i++] = 0x00;
+ suites_[i++] = TLS_RSA_WITH_AES_128_CBC_SHA;
suites_[i++] = 0x00;
suites_[i++] = TLS_RSA_WITH_AES_256_CBC_RMD160;
@@ -471,6 +479,7 @@ void Parameters::SetSuites(ProtocolVersion pv)
suites_[i++] = 0x00;
suites_[i++] = TLS_RSA_WITH_3DES_EDE_CBC_RMD160;
+ if (!removeDH) {
suites_[i++] = 0x00;
suites_[i++] = TLS_DHE_RSA_WITH_AES_256_CBC_RMD160;
suites_[i++] = 0x00;
@@ -485,6 +494,7 @@ void Parameters::SetSuites(ProtocolVersion pv)
suites_[i++] = 0x00;
suites_[i++] = TLS_DHE_DSS_WITH_3DES_EDE_CBC_RMD160;
}
+ }
suites_[i++] = 0x00;
suites_[i++] = SSL_RSA_WITH_RC4_128_SHA;
@@ -496,6 +506,7 @@ void Parameters::SetSuites(ProtocolVersion pv)
suites_[i++] = 0x00;
suites_[i++] = SSL_RSA_WITH_DES_CBC_SHA;
+ if (!removeDH) {
suites_[i++] = 0x00;
suites_[i++] = SSL_DHE_RSA_WITH_3DES_EDE_CBC_SHA;
suites_[i++] = 0x00;
@@ -505,6 +516,7 @@ void Parameters::SetSuites(ProtocolVersion pv)
suites_[i++] = SSL_DHE_RSA_WITH_DES_CBC_SHA;
suites_[i++] = 0x00;
suites_[i++] = SSL_DHE_DSS_WITH_DES_CBC_SHA;
+ }
suites_size_ = i;
diff --git a/extra/yassl/src/yassl_int.cpp b/extra/yassl/src/yassl_int.cpp
index 1ff46903bfd..831942aaf69 100644
--- a/extra/yassl/src/yassl_int.cpp
+++ b/extra/yassl/src/yassl_int.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
@@ -260,7 +264,8 @@ const ClientKeyFactory& sslFactory::getClientKey() const
// extract context parameters and store
SSL::SSL(SSL_CTX* ctx)
: secure_(ctx->getMethod()->getVersion(), crypto_.use_random(),
- ctx->getMethod()->getSide(), ctx->GetCiphers(), ctx)
+ ctx->getMethod()->getSide(), ctx->GetCiphers(), ctx,
+ ctx->GetDH_Parms().set_)
{
if (int err = crypto_.get_random().GetError()) {
SetError(YasslError(err));
@@ -986,6 +991,36 @@ void SSL::fillData(Data& data)
}
+// like Fill but keep data in buffer
+void SSL::PeekData(Data& data)
+{
+ if (GetError()) return;
+ uint dataSz = data.get_length(); // input, data size to fill
+ uint elements = buffers_.getData().size();
+
+ data.set_length(0); // output, actual data filled
+ dataSz = min(dataSz, bufferedData());
+
+ Buffers::inputList::iterator front = buffers_.getData().begin();
+
+ while (elements) {
+ uint frontSz = (*front)->get_remaining();
+ uint readSz = min(dataSz - data.get_length(), frontSz);
+ uint before = (*front)->get_current();
+
+ (*front)->read(data.set_buffer() + data.get_length(), readSz);
+ data.set_length(data.get_length() + readSz);
+ (*front)->set_current(before);
+
+ if (data.get_length() == dataSz)
+ break;
+
+ elements--;
+ front++;
+ }
+}
+
+
// flush output buffer
void SSL::flushBuffer()
{
@@ -1910,9 +1945,9 @@ Buffers::outputList& Buffers::useHandShake()
Security::Security(ProtocolVersion pv, RandomPool& ran, ConnectionEnd ce,
- const Ciphers& ciphers, SSL_CTX* ctx)
- : conn_(pv, ran), parms_(ce, ciphers, pv), resumeSession_(ran), ctx_(ctx),
- resuming_(false)
+ const Ciphers& ciphers, SSL_CTX* ctx, bool haveDH)
+ : conn_(pv, ran), parms_(ce, ciphers, pv, haveDH), resumeSession_(ran),
+ ctx_(ctx), resuming_(false)
{}
diff --git a/extra/yassl/taocrypt/include/aes.hpp b/extra/yassl/taocrypt/include/aes.hpp
index e2c1a34b0e3..cb70f5c0e7e 100644
--- a/extra/yassl/taocrypt/include/aes.hpp
+++ b/extra/yassl/taocrypt/include/aes.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/include/algebra.hpp b/extra/yassl/taocrypt/include/algebra.hpp
index a09ac8dce16..07fc405f093 100644
--- a/extra/yassl/taocrypt/include/algebra.hpp
+++ b/extra/yassl/taocrypt/include/algebra.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/include/arc4.hpp b/extra/yassl/taocrypt/include/arc4.hpp
index c37b89fb294..05b0921f5a1 100644
--- a/extra/yassl/taocrypt/include/arc4.hpp
+++ b/extra/yassl/taocrypt/include/arc4.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/include/asn.hpp b/extra/yassl/taocrypt/include/asn.hpp
index da4c0ce1349..90bc46a59fd 100644
--- a/extra/yassl/taocrypt/include/asn.hpp
+++ b/extra/yassl/taocrypt/include/asn.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/include/block.hpp b/extra/yassl/taocrypt/include/block.hpp
index 76836615ce6..88cb06f62f1 100644
--- a/extra/yassl/taocrypt/include/block.hpp
+++ b/extra/yassl/taocrypt/include/block.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/include/blowfish.hpp b/extra/yassl/taocrypt/include/blowfish.hpp
index 7d794a37329..fbc4f223702 100644
--- a/extra/yassl/taocrypt/include/blowfish.hpp
+++ b/extra/yassl/taocrypt/include/blowfish.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/include/coding.hpp b/extra/yassl/taocrypt/include/coding.hpp
index 9aab2a30c7e..da6771a5bea 100644
--- a/extra/yassl/taocrypt/include/coding.hpp
+++ b/extra/yassl/taocrypt/include/coding.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/include/des.hpp b/extra/yassl/taocrypt/include/des.hpp
index e0867b09166..48bb1e9119d 100644
--- a/extra/yassl/taocrypt/include/des.hpp
+++ b/extra/yassl/taocrypt/include/des.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/include/dh.hpp b/extra/yassl/taocrypt/include/dh.hpp
index 75a5d6280d3..869c3edf5b3 100644
--- a/extra/yassl/taocrypt/include/dh.hpp
+++ b/extra/yassl/taocrypt/include/dh.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/include/dsa.hpp b/extra/yassl/taocrypt/include/dsa.hpp
index f1fdd2dfa25..a5ba416382d 100644
--- a/extra/yassl/taocrypt/include/dsa.hpp
+++ b/extra/yassl/taocrypt/include/dsa.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/include/error.hpp b/extra/yassl/taocrypt/include/error.hpp
index 55ab39313f5..b0ff9b280ba 100644
--- a/extra/yassl/taocrypt/include/error.hpp
+++ b/extra/yassl/taocrypt/include/error.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/include/file.hpp b/extra/yassl/taocrypt/include/file.hpp
index fe7e1073c99..87fc6139f8f 100644
--- a/extra/yassl/taocrypt/include/file.hpp
+++ b/extra/yassl/taocrypt/include/file.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/include/hash.hpp b/extra/yassl/taocrypt/include/hash.hpp
index 16112cb644d..e3030088e0e 100644
--- a/extra/yassl/taocrypt/include/hash.hpp
+++ b/extra/yassl/taocrypt/include/hash.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/include/hmac.hpp b/extra/yassl/taocrypt/include/hmac.hpp
index 543366afc3a..d46a6d4e7c3 100644
--- a/extra/yassl/taocrypt/include/hmac.hpp
+++ b/extra/yassl/taocrypt/include/hmac.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/include/integer.hpp b/extra/yassl/taocrypt/include/integer.hpp
index ee83906cfbc..7e4f6450316 100644
--- a/extra/yassl/taocrypt/include/integer.hpp
+++ b/extra/yassl/taocrypt/include/integer.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/include/kernelc.hpp b/extra/yassl/taocrypt/include/kernelc.hpp
index bb74c10ad07..317359553e4 100644
--- a/extra/yassl/taocrypt/include/kernelc.hpp
+++ b/extra/yassl/taocrypt/include/kernelc.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/include/md2.hpp b/extra/yassl/taocrypt/include/md2.hpp
index 1e1c17e9cbe..89856adffe9 100644
--- a/extra/yassl/taocrypt/include/md2.hpp
+++ b/extra/yassl/taocrypt/include/md2.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/include/md4.hpp b/extra/yassl/taocrypt/include/md4.hpp
index aac930d7498..d2da7e35e4d 100644
--- a/extra/yassl/taocrypt/include/md4.hpp
+++ b/extra/yassl/taocrypt/include/md4.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/include/md5.hpp b/extra/yassl/taocrypt/include/md5.hpp
index 981f29108fe..30d14d54fbf 100644
--- a/extra/yassl/taocrypt/include/md5.hpp
+++ b/extra/yassl/taocrypt/include/md5.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/include/misc.hpp b/extra/yassl/taocrypt/include/misc.hpp
index 0808d76ccdf..48604620706 100644
--- a/extra/yassl/taocrypt/include/misc.hpp
+++ b/extra/yassl/taocrypt/include/misc.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/include/modarith.hpp b/extra/yassl/taocrypt/include/modarith.hpp
index 66a841b05c3..47b91560657 100644
--- a/extra/yassl/taocrypt/include/modarith.hpp
+++ b/extra/yassl/taocrypt/include/modarith.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/include/modes.hpp b/extra/yassl/taocrypt/include/modes.hpp
index 10f336c00eb..65b7318661e 100644
--- a/extra/yassl/taocrypt/include/modes.hpp
+++ b/extra/yassl/taocrypt/include/modes.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/include/pwdbased.hpp b/extra/yassl/taocrypt/include/pwdbased.hpp
index f40c48fe026..c3e916e3d83 100644
--- a/extra/yassl/taocrypt/include/pwdbased.hpp
+++ b/extra/yassl/taocrypt/include/pwdbased.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/include/random.hpp b/extra/yassl/taocrypt/include/random.hpp
index 62f49a8716f..628faaf116c 100644
--- a/extra/yassl/taocrypt/include/random.hpp
+++ b/extra/yassl/taocrypt/include/random.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/include/ripemd.hpp b/extra/yassl/taocrypt/include/ripemd.hpp
index b72e503f095..2e594b7604d 100644
--- a/extra/yassl/taocrypt/include/ripemd.hpp
+++ b/extra/yassl/taocrypt/include/ripemd.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/include/rsa.hpp b/extra/yassl/taocrypt/include/rsa.hpp
index e327fdd20ad..1b531b9d0c0 100644
--- a/extra/yassl/taocrypt/include/rsa.hpp
+++ b/extra/yassl/taocrypt/include/rsa.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/include/runtime.hpp b/extra/yassl/taocrypt/include/runtime.hpp
index 88559cb0ca0..3a72b2917f8 100644
--- a/extra/yassl/taocrypt/include/runtime.hpp
+++ b/extra/yassl/taocrypt/include/runtime.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/include/sha.hpp b/extra/yassl/taocrypt/include/sha.hpp
index 3e301a6f0ae..2d65932dc17 100644
--- a/extra/yassl/taocrypt/include/sha.hpp
+++ b/extra/yassl/taocrypt/include/sha.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/include/twofish.hpp b/extra/yassl/taocrypt/include/twofish.hpp
index 8605221854f..0529c37d6c5 100644
--- a/extra/yassl/taocrypt/include/twofish.hpp
+++ b/extra/yassl/taocrypt/include/twofish.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/include/type_traits.hpp b/extra/yassl/taocrypt/include/type_traits.hpp
index d03ccae64ed..b985358e1c1 100644
--- a/extra/yassl/taocrypt/include/type_traits.hpp
+++ b/extra/yassl/taocrypt/include/type_traits.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/include/types.hpp b/extra/yassl/taocrypt/include/types.hpp
index a2453a994fb..9c3fa8f64f2 100644
--- a/extra/yassl/taocrypt/include/types.hpp
+++ b/extra/yassl/taocrypt/include/types.hpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/src/aes.cpp b/extra/yassl/taocrypt/src/aes.cpp
index e737af33df3..574a88a736c 100644
--- a/extra/yassl/taocrypt/src/aes.cpp
+++ b/extra/yassl/taocrypt/src/aes.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/src/aestables.cpp b/extra/yassl/taocrypt/src/aestables.cpp
index af9924703ef..4118715fd0c 100644
--- a/extra/yassl/taocrypt/src/aestables.cpp
+++ b/extra/yassl/taocrypt/src/aestables.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/src/algebra.cpp b/extra/yassl/taocrypt/src/algebra.cpp
index 9c485609dd0..e9bc3fceac0 100644
--- a/extra/yassl/taocrypt/src/algebra.cpp
+++ b/extra/yassl/taocrypt/src/algebra.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/src/arc4.cpp b/extra/yassl/taocrypt/src/arc4.cpp
index 6d1c4d4e3a6..ea1e084014c 100644
--- a/extra/yassl/taocrypt/src/arc4.cpp
+++ b/extra/yassl/taocrypt/src/arc4.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/src/asn.cpp b/extra/yassl/taocrypt/src/asn.cpp
index 383fe65dea6..beb5490bb66 100644
--- a/extra/yassl/taocrypt/src/asn.cpp
+++ b/extra/yassl/taocrypt/src/asn.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
@@ -472,16 +476,7 @@ void CertDecoder::Decode(SignerList* signers, CertType ct)
return;
}
- if (ct == CA) {
- if ( memcmp(issuerHash_, subjectHash_, SHA::DIGEST_SIZE) == 0 ) {
- if (!ValidateSelfSignature() && verify_)
- source_.SetError(SIG_CONFIRM_E);
- }
- else
- if (!ValidateSignature(signers) && verify_)
- source_.SetError(SIG_OTHER_E);
- }
- else if (!ValidateSignature(signers) && verify_)
+ if (ct != CA && verify_ && !ValidateSignature(signers))
source_.SetError(SIG_OTHER_E);
}
diff --git a/extra/yassl/taocrypt/src/bftables.cpp b/extra/yassl/taocrypt/src/bftables.cpp
index e072b117f54..2dbb6c01674 100644
--- a/extra/yassl/taocrypt/src/bftables.cpp
+++ b/extra/yassl/taocrypt/src/bftables.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/src/blowfish.cpp b/extra/yassl/taocrypt/src/blowfish.cpp
index 16e2277dc10..cc929cd7d41 100644
--- a/extra/yassl/taocrypt/src/blowfish.cpp
+++ b/extra/yassl/taocrypt/src/blowfish.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/src/coding.cpp b/extra/yassl/taocrypt/src/coding.cpp
index 01ea399df13..b8bbd29bb2b 100644
--- a/extra/yassl/taocrypt/src/coding.cpp
+++ b/extra/yassl/taocrypt/src/coding.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/src/des.cpp b/extra/yassl/taocrypt/src/des.cpp
index d2db4fc939e..054c8c2eb78 100644
--- a/extra/yassl/taocrypt/src/des.cpp
+++ b/extra/yassl/taocrypt/src/des.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/src/dh.cpp b/extra/yassl/taocrypt/src/dh.cpp
index 44934394343..aec7122b70b 100644
--- a/extra/yassl/taocrypt/src/dh.cpp
+++ b/extra/yassl/taocrypt/src/dh.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/src/dsa.cpp b/extra/yassl/taocrypt/src/dsa.cpp
index 4574fe5ebd8..a132c7339f5 100644
--- a/extra/yassl/taocrypt/src/dsa.cpp
+++ b/extra/yassl/taocrypt/src/dsa.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/src/file.cpp b/extra/yassl/taocrypt/src/file.cpp
index 4d48b9e7bca..7c59af09708 100644
--- a/extra/yassl/taocrypt/src/file.cpp
+++ b/extra/yassl/taocrypt/src/file.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/src/hash.cpp b/extra/yassl/taocrypt/src/hash.cpp
index 4e783e2c3b1..fbbdf0c8c31 100644
--- a/extra/yassl/taocrypt/src/hash.cpp
+++ b/extra/yassl/taocrypt/src/hash.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/src/integer.cpp b/extra/yassl/taocrypt/src/integer.cpp
index a296e122985..823c0c5e193 100644
--- a/extra/yassl/taocrypt/src/integer.cpp
+++ b/extra/yassl/taocrypt/src/integer.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/src/md2.cpp b/extra/yassl/taocrypt/src/md2.cpp
index 16834b6bc89..aeadbdfb367 100644
--- a/extra/yassl/taocrypt/src/md2.cpp
+++ b/extra/yassl/taocrypt/src/md2.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/src/md4.cpp b/extra/yassl/taocrypt/src/md4.cpp
index dfc2b079141..6012330cba3 100644
--- a/extra/yassl/taocrypt/src/md4.cpp
+++ b/extra/yassl/taocrypt/src/md4.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/src/md5.cpp b/extra/yassl/taocrypt/src/md5.cpp
index 3d64ff8a8a1..f7b0b1ee2dc 100644
--- a/extra/yassl/taocrypt/src/md5.cpp
+++ b/extra/yassl/taocrypt/src/md5.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/src/misc.cpp b/extra/yassl/taocrypt/src/misc.cpp
index 2869df71c8a..b8095334789 100644
--- a/extra/yassl/taocrypt/src/misc.cpp
+++ b/extra/yassl/taocrypt/src/misc.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/src/random.cpp b/extra/yassl/taocrypt/src/random.cpp
index 945a7fa6ff7..2ee1e57a663 100644
--- a/extra/yassl/taocrypt/src/random.cpp
+++ b/extra/yassl/taocrypt/src/random.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/src/ripemd.cpp b/extra/yassl/taocrypt/src/ripemd.cpp
index da96b6cc1b4..c791189544f 100644
--- a/extra/yassl/taocrypt/src/ripemd.cpp
+++ b/extra/yassl/taocrypt/src/ripemd.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/src/rsa.cpp b/extra/yassl/taocrypt/src/rsa.cpp
index 021fd992b5c..b280c48f1af 100644
--- a/extra/yassl/taocrypt/src/rsa.cpp
+++ b/extra/yassl/taocrypt/src/rsa.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/src/sha.cpp b/extra/yassl/taocrypt/src/sha.cpp
index 12f80c1af75..b877e2b7857 100644
--- a/extra/yassl/taocrypt/src/sha.cpp
+++ b/extra/yassl/taocrypt/src/sha.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/src/template_instnt.cpp b/extra/yassl/taocrypt/src/template_instnt.cpp
index 12bcd8238f2..512c5cf9dce 100644
--- a/extra/yassl/taocrypt/src/template_instnt.cpp
+++ b/extra/yassl/taocrypt/src/template_instnt.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/src/tftables.cpp b/extra/yassl/taocrypt/src/tftables.cpp
index 55846d5f79d..7d7f2e98ac3 100644
--- a/extra/yassl/taocrypt/src/tftables.cpp
+++ b/extra/yassl/taocrypt/src/tftables.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/taocrypt/src/twofish.cpp b/extra/yassl/taocrypt/src/twofish.cpp
index 8b896ad5dc4..a16a8f0d169 100644
--- a/extra/yassl/taocrypt/src/twofish.cpp
+++ b/extra/yassl/taocrypt/src/twofish.cpp
@@ -9,6 +9,10 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
+ * There are special exceptions to the terms and conditions of the GPL as it
+ * is applied to yaSSL. View the full text of the exception in the file
+ * FLOSS-EXCEPTIONS in the directory of this software distribution.
+ *
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
diff --git a/extra/yassl/yassl.dsp b/extra/yassl/yassl.dsp
index dc090512743..58c016da448 100644
--- a/extra/yassl/yassl.dsp
+++ b/extra/yassl/yassl.dsp
@@ -40,8 +40,8 @@ RSC=rc.exe
# PROP Output_Dir "Release"
# PROP Intermediate_Dir "Release"
# PROP Target_Dir ""
-# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_MBCS" /D "_LIB" /YX /FD /c
-# ADD CPP /nologo /MT /W3 /O2 /I "include" /I "taocrypt\include" /I "mySTL" /D "WIN32" /D "NDEBUG" /D "_MBCS" /D "_LIB" /YX /FD /c
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_MBCS" /D "_LIB" /D "YASSL_PREFIX" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /O2 /I "include" /I "taocrypt\include" /I "mySTL" /D "WIN32" /D "NDEBUG" /D "_MBCS" /D "_LIB" /D "YASSL_PREFIX" /YX /FD /c
# ADD BASE RSC /l 0x409 /d "NDEBUG"
# ADD RSC /l 0x409 /d "NDEBUG"
BSC32=bscmake.exe
@@ -63,8 +63,8 @@ LIB32=link.exe -lib
# PROP Output_Dir "Debug"
# PROP Intermediate_Dir "Debug"
# PROP Target_Dir ""
-# ADD BASE CPP /nologo /W3 /Gm /GX /ZI /Od /D "WIN32" /D "_DEBUG" /D "_MBCS" /D "_LIB" /YX /FD /GZ /c
-# ADD CPP /nologo /MTd /W3 /Gm /ZI /Od /I "include" /I "taocrypt\include" /I "mySTL" /D "WIN32" /D "_DEBUG" /D "_MBCS" /D "_LIB" /FR /YX /FD /GZ /c
+# ADD BASE CPP /nologo /W3 /Gm /GX /ZI /Od /D "WIN32" /D "_DEBUG" /D "_MBCS" /D "_LIB" /D "YASSL_PREFIX" /YX /FD /GZ /c
+# ADD CPP /nologo /MTd /W3 /Gm /ZI /Od /I "include" /I "taocrypt\include" /I "mySTL" /D "WIN32" /D "_DEBUG" /D "_MBCS" /D "_LIB" /D "YASSL_PREFIX" /FR /YX /FD /GZ /c
# ADD BASE RSC /l 0x409 /d "_DEBUG"
# ADD RSC /l 0x409 /d "_DEBUG"
BSC32=bscmake.exe
diff --git a/include/my_base.h b/include/my_base.h
index 728b2ab2a1b..d75df093a11 100644
--- a/include/my_base.h
+++ b/include/my_base.h
@@ -21,7 +21,6 @@
#define _my_base_h
#ifndef stdin /* Included first in handler */
-#define USES_TYPES /* my_dir with sys/types is included */
#define CHSIZE_USED
#include <my_global.h>
#include <my_dir.h> /* This includes types */
@@ -156,7 +155,16 @@ enum ha_extra_function {
Mark the table as a log table. For some handlers (e.g. CSV) this results
in a special locking for the table.
*/
- HA_EXTRA_MARK_AS_LOG_TABLE
+ HA_EXTRA_MARK_AS_LOG_TABLE,
+ /*
+ Informs handler that write_row() which tries to insert new row into the
+ table and encounters some already existing row with same primary/unique
+ key can replace old row with new row instead of reporting error (basically
+ it informs handler that we do REPLACE instead of simple INSERT).
+ Off by default.
+ */
+ HA_EXTRA_WRITE_CAN_REPLACE,
+ HA_EXTRA_WRITE_CANNOT_REPLACE
};
/* The following is parameter to ha_panic() */
diff --git a/include/my_global.h b/include/my_global.h
index 21b2251af1d..539a2ea644f 100644
--- a/include/my_global.h
+++ b/include/my_global.h
@@ -1159,8 +1159,8 @@ do { doubleget_union _tmp; \
#define doublestore(T,V) do { *((long *) T) = ((doubleget_union *)&V)->m[0]; \
*(((long *) T)+1) = ((doubleget_union *)&V)->m[1]; \
} while (0)
-#define float4get(V,M) do { *((long *) &(V)) = *((long*) (M)); } while(0)
-#define float8get(V,M) doubleget((V),(M))
+#define float4get(V,M) do { *((float *) &(V)) = *((float*) (M)); } while(0)
+#define float8get(V,M) doubleget((V),(M))
#define float4store(V,M) memcpy((byte*) V,(byte*) (&M),sizeof(float))
#define floatstore(T,V) memcpy((byte*)(T), (byte*)(&V),sizeof(float))
#define floatget(V,M) memcpy((byte*) &V,(byte*) (M),sizeof(float))
diff --git a/include/my_handler.h b/include/my_handler.h
index d531e0fb3e1..61665090853 100644
--- a/include/my_handler.h
+++ b/include/my_handler.h
@@ -18,7 +18,6 @@
#ifndef _my_handler_h
#define _my_handler_h
-#include "my_global.h"
#include "my_base.h"
#include "m_ctype.h"
#include "myisampack.h"
diff --git a/include/my_sys.h b/include/my_sys.h
index 2dc4053f70d..4b31f6bcd2b 100644
--- a/include/my_sys.h
+++ b/include/my_sys.h
@@ -163,7 +163,7 @@ extern gptr my_realloc(gptr oldpoint,uint Size,myf MyFlags);
extern void my_no_flags_free(gptr ptr);
extern gptr my_memdup(const byte *from,uint length,myf MyFlags);
extern char *my_strdup(const char *from,myf MyFlags);
-extern char *my_strndup(const byte *from, uint length,
+extern char *my_strndup(const char *from, uint length,
myf MyFlags);
/* we do use FG (as a no-op) in below so that a typo on FG is caught */
#define my_free(PTR,FG) ((void)FG,my_no_flags_free(PTR))
@@ -587,7 +587,7 @@ extern gptr _my_memdup(const byte *from,uint length,
const char *sFile, uint uLine,myf MyFlag);
extern my_string _my_strdup(const char *from, const char *sFile, uint uLine,
myf MyFlag);
-extern char *_my_strndup(const byte *from, uint length,
+extern char *_my_strndup(const char *from, uint length,
const char *sFile, uint uLine,
myf MyFlag);
diff --git a/include/mysql.h b/include/mysql.h
index 3a71e47f414..7ed205024e2 100644
--- a/include/mysql.h
+++ b/include/mysql.h
@@ -236,6 +236,7 @@ typedef struct character_set
} MY_CHARSET_INFO;
struct st_mysql_methods;
+struct st_mysql_stmt;
typedef struct st_mysql
{
@@ -293,6 +294,12 @@ typedef struct st_mysql
/* needed for embedded server - no net buffer to store the 'info' */
char *info_buffer;
#endif
+ /*
+ In embedded server it points to the statement that is processed
+ in the current query. We store some results directly in statement
+ fields then.
+ */
+ struct st_mysql_stmt *current_stmt;
} MYSQL;
typedef struct st_mysql_res {
@@ -745,7 +752,8 @@ typedef struct st_mysql_methods
unsigned long header_length,
const char *arg,
unsigned long arg_length,
- my_bool skip_check);
+ my_bool skip_check,
+ MYSQL_STMT *stmt);
MYSQL_DATA *(*read_rows)(MYSQL *mysql,MYSQL_FIELD *mysql_fields,
unsigned int fields);
MYSQL_RES * (*use_result)(MYSQL *mysql);
@@ -835,8 +843,11 @@ int STDCALL mysql_drop_db(MYSQL *mysql, const char *DB);
*/
#define simple_command(mysql, command, arg, length, skip_check) \
- (*(mysql)->methods->advanced_command)(mysql, command, \
- NullS, 0, arg, length, skip_check)
+ (*(mysql)->methods->advanced_command)(mysql, command, NullS, \
+ 0, arg, length, skip_check, NULL)
+#define stmt_command(mysql, command, arg, length, stmt) \
+ (*(mysql)->methods->advanced_command)(mysql, command, NullS, \
+ 0, arg, length, 1, stmt)
unsigned long net_safe_read(MYSQL* mysql);
#ifdef __NETWARE__
diff --git a/include/sql_common.h b/include/sql_common.h
index c07a4a831bb..7ea8b6c87e0 100644
--- a/include/sql_common.h
+++ b/include/sql_common.h
@@ -22,6 +22,7 @@ extern const char *not_error_sqlstate;
extern "C" {
#endif
+extern CHARSET_INFO *default_client_charset_info;
MYSQL_FIELD *unpack_fields(MYSQL_DATA *data,MEM_ROOT *alloc,uint fields,
my_bool default_value, uint server_capabilities);
void free_rows(MYSQL_DATA *cur);
@@ -33,7 +34,8 @@ void mysql_read_default_options(struct st_mysql_options *options,
my_bool
cli_advanced_command(MYSQL *mysql, enum enum_server_command command,
const char *header, ulong header_length,
- const char *arg, ulong arg_length, my_bool skip_check);
+ const char *arg, ulong arg_length, my_bool skip_check,
+ MYSQL_STMT *stmt);
void set_stmt_errmsg(MYSQL_STMT * stmt, const char *err, int errcode,
const char *sqlstate);
diff --git a/libmysql/libmysql.c b/libmysql/libmysql.c
index 225b3926aa7..c2e7df6f208 100644
--- a/libmysql/libmysql.c
+++ b/libmysql/libmysql.c
@@ -2055,7 +2055,7 @@ mysql_stmt_prepare(MYSQL_STMT *stmt, const char *query, ulong length)
mysql_use_result it won't be freed in mysql_stmt_free_result and
we should get 'Commands out of sync' here.
*/
- if (simple_command(mysql, COM_STMT_CLOSE, buff, 4, 1))
+ if (stmt_command(mysql, COM_STMT_CLOSE, buff, 4, stmt))
{
set_stmt_errmsg(stmt, mysql->net.last_error, mysql->net.last_errno,
mysql->net.sqlstate);
@@ -2064,7 +2064,7 @@ mysql_stmt_prepare(MYSQL_STMT *stmt, const char *query, ulong length)
stmt->state= MYSQL_STMT_INIT_DONE;
}
- if (simple_command(mysql, COM_STMT_PREPARE, query, length, 1))
+ if (stmt_command(mysql, COM_STMT_PREPARE, query, length, stmt))
{
set_stmt_errmsg(stmt, mysql->net.last_error, mysql->net.last_errno,
mysql->net.sqlstate);
@@ -2385,10 +2385,9 @@ static void net_store_datetime(NET *net, MYSQL_TIME *tm)
static void store_param_date(NET *net, MYSQL_BIND *param)
{
- MYSQL_TIME *tm= (MYSQL_TIME *) param->buffer;
- tm->hour= tm->minute= tm->second= 0;
- tm->second_part= 0;
- net_store_datetime(net, tm);
+ MYSQL_TIME tm= *((MYSQL_TIME *) param->buffer);
+ tm.hour= tm.minute= tm.second= tm.second_part= 0;
+ net_store_datetime(net, &tm);
}
static void store_param_datetime(NET *net, MYSQL_BIND *param)
@@ -2481,7 +2480,7 @@ static my_bool execute(MYSQL_STMT *stmt, char *packet, ulong length)
buff[4]= (char) stmt->flags;
int4store(buff+5, 1); /* iteration count */
if (cli_advanced_command(mysql, COM_STMT_EXECUTE, buff, sizeof(buff),
- packet, length, 1) ||
+ packet, length, 1, NULL) ||
(*mysql->methods->read_query_result)(mysql))
{
set_stmt_errmsg(stmt, net->last_error, net->last_errno, net->sqlstate);
@@ -2694,7 +2693,8 @@ stmt_read_row_from_cursor(MYSQL_STMT *stmt, unsigned char **row)
int4store(buff, stmt->stmt_id);
int4store(buff + 4, stmt->prefetch_rows); /* number of rows to fetch */
if ((*mysql->methods->advanced_command)(mysql, COM_STMT_FETCH,
- buff, sizeof(buff), NullS, 0, 1))
+ buff, sizeof(buff), NullS, 0,
+ 1, NULL))
{
set_stmt_errmsg(stmt, net->last_error, net->last_errno, net->sqlstate);
return 1;
@@ -3361,7 +3361,7 @@ mysql_stmt_send_long_data(MYSQL_STMT *stmt, uint param_number,
*/
if ((*mysql->methods->advanced_command)(mysql, COM_STMT_SEND_LONG_DATA,
buff, sizeof(buff), data,
- length, 1))
+ length, 1, NULL))
{
set_stmt_errmsg(stmt, mysql->net.last_error,
mysql->net.last_errno, mysql->net.sqlstate);
@@ -4749,7 +4749,7 @@ int STDCALL mysql_stmt_store_result(MYSQL_STMT *stmt)
int4store(buff, stmt->stmt_id);
int4store(buff + 4, (int)~0); /* number of rows to fetch */
if (cli_advanced_command(mysql, COM_STMT_FETCH, buff, sizeof(buff),
- NullS, 0, 1))
+ NullS, 0, 1, NULL))
{
set_stmt_errmsg(stmt, net->last_error, net->last_errno, net->sqlstate);
DBUG_RETURN(1);
@@ -4943,7 +4943,7 @@ static my_bool reset_stmt_handle(MYSQL_STMT *stmt, uint flags)
char buff[MYSQL_STMT_HEADER]; /* packet header: 4 bytes for stmt id */
int4store(buff, stmt->stmt_id);
if ((*mysql->methods->advanced_command)(mysql, COM_STMT_RESET, buff,
- sizeof(buff), 0, 0, 0))
+ sizeof(buff), 0, 0, 0, NULL))
{
set_stmt_errmsg(stmt, mysql->net.last_error, mysql->net.last_errno,
mysql->net.sqlstate);
@@ -5017,7 +5017,7 @@ my_bool STDCALL mysql_stmt_close(MYSQL_STMT *stmt)
mysql->status= MYSQL_STATUS_READY;
}
int4store(buff, stmt->stmt_id);
- if ((rc= simple_command(mysql, COM_STMT_CLOSE, buff, 4, 1)))
+ if ((rc= stmt_command(mysql, COM_STMT_CLOSE, buff, 4, stmt)))
{
set_stmt_errmsg(stmt, mysql->net.last_error, mysql->net.last_errno,
mysql->net.sqlstate);
diff --git a/libmysqld/CMakeLists.txt b/libmysqld/CMakeLists.txt
index 69d8b6e0f67..210bad20024 100644
--- a/libmysqld/CMakeLists.txt
+++ b/libmysqld/CMakeLists.txt
@@ -28,7 +28,7 @@ ADD_LIBRARY(mysqldemb emb_qcache.cc libmysqld.c lib_sql.cc
../client/get_password.c ../libmysql/errmsg.c
../libmysql/libmysql.c ../sql/password.c ../sql-common/client.c
../sql-common/my_time.c ../sql-common/my_user.c
- ../sql-common/pack.c ../sql/derror.cc ../sql/event_executor.cc
+ ../sql-common/pack.c ../sql/derror.cc ../sql/event_scheduler.cc
../sql/event_timed.cc ../sql/events.cc ../sql/discover.cc
../sql/field_conv.cc ../sql/field.cc ../sql/filesort.cc
../sql/gstream.cc ../sql/ha_heap.cc ../sql/ha_myisam.cc
@@ -61,8 +61,9 @@ ADD_LIBRARY(mysqldemb emb_qcache.cc libmysqld.c lib_sql.cc
../sql/sql_update.cc ../sql/sql_view.cc
../sql/strfunc.cc ../sql/table.cc ../sql/thr_malloc.cc
../sql/time.cc ../sql/tztime.cc ../sql/uniques.cc ../sql/unireg.cc
- ../sql/partition_info.cc ../vio/vio.c
- ../vio/viosocket.c ../vio/viossl.c ../vio/viosslfactories.c
+ ../sql/partition_info.cc ../sql/sql_locale.cc
+ ../vio/vio.c ../vio/viosocket.c ../vio/viossl.c
+ ../vio/viosslfactories.c
${PROJECT_SOURCE_DIR}/sql/sql_yacc.cc
${PROJECT_SOURCE_DIR}/sql/sql_yacc.h)
diff --git a/libmysqld/Makefile.am b/libmysqld/Makefile.am
index b91944ac258..befbd3fad3a 100644
--- a/libmysqld/Makefile.am
+++ b/libmysqld/Makefile.am
@@ -56,7 +56,7 @@ sqlsources = derror.cc field.cc field_conv.cc strfunc.cc filesort.cc \
key.cc lock.cc log.cc log_event.cc sql_state.c \
protocol.cc net_serv.cc opt_range.cc \
opt_sum.cc procedure.cc records.cc sql_acl.cc \
- sql_load.cc discover.cc \
+ sql_load.cc discover.cc sql_locale.cc \
sql_analyse.cc sql_base.cc sql_cache.cc sql_class.cc \
sql_crypt.cc sql_db.cc sql_delete.cc sql_error.cc sql_insert.cc \
sql_lex.cc sql_list.cc sql_manager.cc sql_map.cc sql_parse.cc \
diff --git a/libmysqld/embedded_priv.h b/libmysqld/embedded_priv.h
index 88015340e8c..5ba6f34a2eb 100644
--- a/libmysqld/embedded_priv.h
+++ b/libmysqld/embedded_priv.h
@@ -18,9 +18,9 @@
C_MODE_START
void lib_connection_phase(NET *net, int phase);
-void init_embedded_mysql(MYSQL *mysql, int client_flag, char *db);
-void *create_embedded_thd(int client_flag, char *db);
-int check_embedded_connection(MYSQL *mysql);
+void init_embedded_mysql(MYSQL *mysql, int client_flag);
+void *create_embedded_thd(int client_flag);
+int check_embedded_connection(MYSQL *mysql, const char *db);
void free_old_query(MYSQL *mysql);
extern MYSQL_METHODS embedded_methods;
diff --git a/libmysqld/lib_sql.cc b/libmysqld/lib_sql.cc
index 9e763df8a0a..14dbe21fce0 100644
--- a/libmysqld/lib_sql.cc
+++ b/libmysqld/lib_sql.cc
@@ -37,6 +37,8 @@ extern "C"
int check_user(THD *thd, enum enum_server_command command,
const char *passwd, uint passwd_len, const char *db,
bool check_count);
+void thd_init_client_charset(THD *thd, uint cs_number);
+
C_MODE_START
#include <mysql.h>
@@ -77,7 +79,8 @@ void embedded_get_error(MYSQL *mysql, MYSQL_DATA *data)
static my_bool
emb_advanced_command(MYSQL *mysql, enum enum_server_command command,
const char *header, ulong header_length,
- const char *arg, ulong arg_length, my_bool skip_check)
+ const char *arg, ulong arg_length, my_bool skip_check,
+ MYSQL_STMT *stmt)
{
my_bool result= 1;
THD *thd=(THD *) mysql->thd;
@@ -97,6 +100,7 @@ emb_advanced_command(MYSQL *mysql, enum enum_server_command command,
mysql->affected_rows= ~(my_ulonglong) 0;
mysql->field_count= 0;
net->last_errno= 0;
+ mysql->current_stmt= stmt;
thd->store_globals(); // Fix if more than one connect
/*
@@ -282,7 +286,7 @@ static int emb_stmt_execute(MYSQL_STMT *stmt)
thd->client_param_count= stmt->param_count;
thd->client_params= stmt->params;
if (emb_advanced_command(stmt->mysql, COM_STMT_EXECUTE,0,0,
- header, sizeof(header), 1) ||
+ header, sizeof(header), 1, stmt) ||
emb_read_query_result(stmt->mysql))
{
NET *net= &stmt->mysql->net;
@@ -382,7 +386,6 @@ static MYSQL_RES * emb_store_result(MYSQL *mysql)
return mysql_store_result(mysql);
}
-
int emb_read_change_user_result(MYSQL *mysql,
char *buff __attribute__((unused)),
const char *passwd __attribute__((unused)))
@@ -552,7 +555,7 @@ void end_embedded_server()
}
-void init_embedded_mysql(MYSQL *mysql, int client_flag, char *db)
+void init_embedded_mysql(MYSQL *mysql, int client_flag)
{
THD *thd = (THD *)mysql->thd;
thd->mysql= mysql;
@@ -560,7 +563,7 @@ void init_embedded_mysql(MYSQL *mysql, int client_flag, char *db)
init_alloc_root(&mysql->field_alloc, 8192, 0);
}
-void *create_embedded_thd(int client_flag, char *db)
+void *create_embedded_thd(int client_flag)
{
THD * thd= new THD;
thd->thread_id= thread_id++;
@@ -586,8 +589,8 @@ void *create_embedded_thd(int client_flag, char *db)
thd->init_for_queries();
thd->client_capabilities= client_flag;
- thd->db= db;
- thd->db_length= db ? strip_sp(db) : 0;
+ thd->db= NULL;
+ thd->db_length= 0;
#ifndef NO_EMBEDDED_ACCESS_CHECKS
thd->security_ctx->db_access= DB_ACLS;
thd->security_ctx->master_access= ~NO_ACCESS;
@@ -604,22 +607,25 @@ err:
return NULL;
}
+
#ifdef NO_EMBEDDED_ACCESS_CHECKS
-int check_embedded_connection(MYSQL *mysql)
+int check_embedded_connection(MYSQL *mysql, const char *db)
{
int result;
THD *thd= (THD*)mysql->thd;
+ thd_init_client_charset(thd, mysql->charset->number);
+ thd->update_charset();
Security_context *sctx= thd->security_ctx;
sctx->host_or_ip= sctx->host= (char*) my_localhost;
strmake(sctx->priv_host, (char*) my_localhost, MAX_HOSTNAME-1);
sctx->priv_user= sctx->user= my_strdup(mysql->user, MYF(0));
- result= check_user(thd, COM_CONNECT, NULL, 0, thd->db, true);
+ result= check_user(thd, COM_CONNECT, NULL, 0, db, true);
emb_read_query_result(mysql);
return result;
}
#else
-int check_embedded_connection(MYSQL *mysql)
+int check_embedded_connection(MYSQL *mysql, const char *db)
{
THD *thd= (THD*)mysql->thd;
Security_context *sctx= thd->security_ctx;
@@ -627,6 +633,8 @@ int check_embedded_connection(MYSQL *mysql)
char scramble_buff[SCRAMBLE_LENGTH];
int passwd_len;
+ thd_init_client_charset(thd, mysql->charset->number);
+ thd->update_charset();
if (mysql->options.client_ip)
{
sctx->host= my_strdup(mysql->options.client_ip, MYF(0));
@@ -654,7 +662,7 @@ int check_embedded_connection(MYSQL *mysql)
passwd_len= 0;
if((result= check_user(thd, COM_CONNECT,
- scramble_buff, passwd_len, thd->db, true)))
+ scramble_buff, passwd_len, db, true)))
goto err;
return 0;
diff --git a/libmysqld/libmysqld.c b/libmysqld/libmysqld.c
index cad1bd4c47b..cb4fa104b4c 100644
--- a/libmysqld/libmysqld.c
+++ b/libmysqld/libmysqld.c
@@ -90,56 +90,13 @@ static void end_server(MYSQL *mysql)
}
-static int mysql_init_charset(MYSQL *mysql)
-{
- char charset_name_buff[16], *charset_name;
-
- if ((charset_name=mysql->options.charset_name))
- {
- const char *save=charsets_dir;
- if (mysql->options.charset_dir)
- charsets_dir=mysql->options.charset_dir;
- mysql->charset=get_charset_by_name(mysql->options.charset_name,
- MYF(MY_WME));
- charsets_dir=save;
- }
- else if (mysql->server_language)
- {
- charset_name=charset_name_buff;
- sprintf(charset_name,"%d",mysql->server_language); /* In case of errors */
- mysql->charset=get_charset((uint8) mysql->server_language, MYF(MY_WME));
- }
- else
- mysql->charset=default_charset_info;
-
- if (!mysql->charset)
- {
- mysql->net.last_errno=CR_CANT_READ_CHARSET;
- strmov(mysql->net.sqlstate, "HY0000");
- if (mysql->options.charset_dir)
- sprintf(mysql->net.last_error,ER(mysql->net.last_errno),
- charset_name ? charset_name : "unknown",
- mysql->options.charset_dir);
- else
- {
- char cs_dir_name[FN_REFLEN];
- get_charsets_dir(cs_dir_name);
- sprintf(mysql->net.last_error,ER(mysql->net.last_errno),
- charset_name ? charset_name : "unknown",
- cs_dir_name);
- }
- return mysql->net.last_errno;
- }
- return 0;
-}
-
+int mysql_init_character_set(MYSQL *mysql);
MYSQL * STDCALL
mysql_real_connect(MYSQL *mysql,const char *host, const char *user,
const char *passwd, const char *db,
uint port, const char *unix_socket,ulong client_flag)
{
- char *db_name;
char name_buff[USERNAME_LENGTH];
DBUG_ENTER("mysql_real_connect");
@@ -207,7 +164,6 @@ mysql_real_connect(MYSQL *mysql,const char *host, const char *user,
port=0;
unix_socket=0;
- db_name = db ? my_strdup(db,MYF(MY_WME)) : NULL;
/* Send client information for access check */
client_flag|=CLIENT_CAPABILITIES;
@@ -218,14 +174,14 @@ mysql_real_connect(MYSQL *mysql,const char *host, const char *user,
client_flag|=CLIENT_CONNECT_WITH_DB;
mysql->info_buffer= my_malloc(MYSQL_ERRMSG_SIZE, MYF(0));
- mysql->thd= create_embedded_thd(client_flag, db_name);
+ mysql->thd= create_embedded_thd(client_flag);
- init_embedded_mysql(mysql, client_flag, db_name);
+ init_embedded_mysql(mysql, client_flag);
- if (check_embedded_connection(mysql))
+ if (mysql_init_character_set(mysql))
goto error;
- if (mysql_init_charset(mysql))
+ if (check_embedded_connection(mysql, db))
goto error;
mysql->server_status= SERVER_STATUS_AUTOCOMMIT;
diff --git a/mysql-test/Makefile.am b/mysql-test/Makefile.am
index 5aaddf36aa3..852ec463420 100644
--- a/mysql-test/Makefile.am
+++ b/mysql-test/Makefile.am
@@ -112,6 +112,7 @@ install-data-local:
uninstall-local:
@RM@ -f -r $(DESTDIR)$(testdir)
+
SUFFIXES = .sh
.sh:
diff --git a/mysql-test/extra/binlog_tests/binlog.test b/mysql-test/extra/binlog_tests/binlog.test
index 6f7990893f0..993b3fbf634 100644
--- a/mysql-test/extra/binlog_tests/binlog.test
+++ b/mysql-test/extra/binlog_tests/binlog.test
@@ -49,3 +49,35 @@ show binlog events in 'master-bin.000001' from 102;
--replace_column 2 # 5 #
--replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\//
show binlog events in 'master-bin.000002' from 102;
+
+# Test of a too big SET INSERT_ID: see if the truncated value goes
+# into binlog (right), or the too big value (wrong); we look at the
+# binlog further down with SHOW BINLOG EVENTS.
+reset master;
+create table t1 (id tinyint auto_increment primary key);
+set insert_id=128;
+insert into t1 values(null);
+select * from t1;
+drop table t1;
+
+# Test of binlogging of INSERT_ID with INSERT DELAYED
+create table t1 (a int not null auto_increment, primary key (a)) engine=myisam;
+# First, avoid BUG#20627:
+set @@session.auto_increment_increment=1, @@session.auto_increment_offset=1;
+# Verify that only one INSERT_ID event is binlogged.
+insert delayed into t1 values (207);
+
+# We use sleeps between statements, that's the only way to get a
+# repeatable binlog in a normal test run and under Valgrind.
+# It may be that the "binlog missing rows" of BUG#20821 shows up
+# here.
+sleep 2;
+insert delayed into t1 values (null);
+sleep 2;
+insert delayed into t1 values (300);
+sleep 2; # time for the delayed queries to reach disk
+select * from t1;
+--replace_column 2 # 5 #
+--replace_regex /table_id: [0-9]+/table_id: #/
+show binlog events from 102;
+drop table t1;
diff --git a/mysql-test/extra/binlog_tests/blackhole.test b/mysql-test/extra/binlog_tests/blackhole.test
index 97243015aba..cad4380a374 100644
--- a/mysql-test/extra/binlog_tests/blackhole.test
+++ b/mysql-test/extra/binlog_tests/blackhole.test
@@ -129,7 +129,6 @@ show binlog events;
drop table t1,t2,t3;
# End of 4.1 tests
-
# Test that a transaction which is rolled back does not go into binlog
# and that a transaction which is committed does
@@ -147,15 +146,6 @@ set autocommit=1;
--replace_column 2 # 5 #
--replace_regex /table_id: [0-9]+/table_id: #/
show binlog events;
-
-#
-# BUG#10952 - alter table ... lost data without errors and warnings
-#
drop table if exists t1;
-create table t1 (c char(20)) engine=MyISAM;
-insert into t1 values ("Monty"),("WAX"),("Walrus");
---error 1031
-alter table t1 engine=blackhole;
-drop table t1;
# End of 5.0 tests
diff --git a/mysql-test/extra/binlog_tests/mix_innodb_myisam_binlog.test b/mysql-test/extra/binlog_tests/mix_innodb_myisam_binlog.test
index b5052620f91..bdc49573ae5 100644
--- a/mysql-test/extra/binlog_tests/mix_innodb_myisam_binlog.test
+++ b/mysql-test/extra/binlog_tests/mix_innodb_myisam_binlog.test
@@ -29,7 +29,7 @@ insert into t1 values(1);
insert into t2 select * from t1;
commit;
---replace_column 5 #
+--replace_column 2 # 5 #
--replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\//
show binlog events from 102;
@@ -43,7 +43,7 @@ insert into t2 select * from t1;
# should say some changes to non-transact1onal tables couldn't be rolled back
rollback;
---replace_column 5 #
+--replace_column 2 # 5 #
--replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\//
show binlog events from 102;
@@ -59,7 +59,7 @@ insert into t2 select * from t1;
rollback to savepoint my_savepoint;
commit;
---replace_column 5 #
+--replace_column 2 # 5 #
--replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\//
show binlog events from 102;
@@ -77,7 +77,7 @@ insert into t1 values(7);
commit;
select a from t1 order by a; # check that savepoints work :)
---replace_column 5 #
+--replace_column 2 # 5 #
--replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\//
show binlog events from 102;
@@ -99,7 +99,7 @@ connection con2;
# so SHOW BINLOG EVENTS may come before con1 does the loggin. To be sure that
# logging has been done, we use a user lock.
select get_lock("a",10);
---replace_column 5 #
+--replace_column 2 # 5 #
--replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\//
show binlog events from 102;
@@ -111,7 +111,7 @@ reset master;
insert into t1 values(9);
insert into t2 select * from t1;
---replace_column 5 #
+--replace_column 2 # 5 #
--replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\//
show binlog events from 102;
@@ -124,13 +124,13 @@ reset master;
insert into t1 values(10); # first make t1 non-empty
begin;
insert into t2 select * from t1;
---replace_column 5 #
+--replace_column 2 # 5 #
--replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\//
show binlog events from 102;
insert into t1 values(11);
commit;
---replace_column 5 #
+--replace_column 2 # 5 #
--replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\//
show binlog events from 102;
@@ -149,7 +149,7 @@ insert into t1 values(12);
insert into t2 select * from t1;
commit;
---replace_column 5 #
+--replace_column 2 # 5 #
--replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\//
show binlog events from 102;
@@ -162,7 +162,7 @@ insert into t1 values(13);
insert into t2 select * from t1;
rollback;
---replace_column 5 #
+--replace_column 2 # 5 #
--replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\//
show binlog events from 102;
@@ -178,7 +178,7 @@ insert into t2 select * from t1;
rollback to savepoint my_savepoint;
commit;
---replace_column 5 #
+--replace_column 2 # 5 #
--replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\//
show binlog events from 102;
@@ -196,7 +196,7 @@ insert into t1 values(18);
commit;
select a from t1 order by a; # check that savepoints work :)
---replace_column 5 #
+--replace_column 2 # 5 #
--replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\//
show binlog events from 102;
@@ -257,7 +257,7 @@ insert into t2 values (3);
disconnect con2;
connection con3;
select get_lock("lock1",60);
---replace_column 5 #
+--replace_column 2 # 5 #
--replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\//
show binlog events from 102;
do release_lock("lock1");
@@ -324,6 +324,7 @@ CREATE TEMPORARY TABLE IF NOT EXISTS t2 (primary key (a)) engine=innodb select *
ROLLBACK;
SELECT * from t2;
DROP TABLE t1,t2;
+--replace_column 2 # 5 #
--replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\//
show binlog events from 102;
diff --git a/mysql-test/extra/rpl_tests/rpl_auto_increment.test b/mysql-test/extra/rpl_tests/rpl_auto_increment.test
index 42f8fcfc5fb..dbae317e8ab 100644
--- a/mysql-test/extra/rpl_tests/rpl_auto_increment.test
+++ b/mysql-test/extra/rpl_tests/rpl_auto_increment.test
@@ -104,9 +104,47 @@ select * from t1;
sync_slave_with_master;
select * from t1;
-connection master;
+# Test for BUG#20524 "auto_increment_* not observed when inserting
+# a too large value". When an autogenerated value was bigger than the
+# maximum possible value of the field, it was truncated to that max
+# possible value, without being "rounded down" to still honour
+# auto_increment_* variables.
+
+connection master;
drop table t1;
+create table t1 (a tinyint not null auto_increment primary key) engine=myisam;
+insert into t1 values(103);
+set auto_increment_increment=11;
+set auto_increment_offset=4;
+insert into t1 values(null);
+insert into t1 values(null);
+--error 1062
+insert into t1 values(null);
+select a, mod(a-@@auto_increment_offset,@@auto_increment_increment) from t1 order by a;
+
+# same but with a larger value
+create table t2 (a tinyint unsigned not null auto_increment primary key) engine=myisam;
+set auto_increment_increment=10;
+set auto_increment_offset=1;
+set insert_id=1000;
+insert into t2 values(null);
+select a, mod(a-@@auto_increment_offset,@@auto_increment_increment) from t2 order by a;
+
+# An offset so big that even first value does not fit
+create table t3 like t1;
+set auto_increment_increment=1000;
+set auto_increment_offset=700;
+insert into t3 values(null);
+select * from t3 order by a;
+sync_slave_with_master;
+select * from t1 order by a;
+select * from t2 order by a;
+select * from t3 order by a;
+
+connection master;
+
+drop table t1,t2,t3;
# End cleanup
sync_slave_with_master;
diff --git a/mysql-test/extra/rpl_tests/rpl_insert_id.test b/mysql-test/extra/rpl_tests/rpl_insert_id.test
index 68e39c54381..79f8c39e152 100644
--- a/mysql-test/extra/rpl_tests/rpl_insert_id.test
+++ b/mysql-test/extra/rpl_tests/rpl_insert_id.test
@@ -84,9 +84,28 @@ SET FOREIGN_KEY_CHECKS=0;
--error 1022, 1062
INSERT INTO t1 VALUES (1),(1);
sync_slave_with_master;
+connection master;
+drop table t1;
+sync_slave_with_master;
# End of 4.1 tests
-
+
+#
+# Bug#14553: NULL in WHERE resets LAST_INSERT_ID
+#
+connection master;
+create table t1(a int auto_increment, key(a));
+create table t2(a int);
+insert into t1 (a) values (null);
+insert into t2 (a) select a from t1 where a is null;
+insert into t2 (a) select a from t1 where a is null;
+select * from t2;
+sync_slave_with_master;
+connection slave;
+select * from t2;
+connection master;
+drop table t1;
+drop table t2;
#
# BUG#15728: LAST_INSERT_ID function inside a stored function returns 0
@@ -144,6 +163,23 @@ insert into t1 (last_id) values (bug15728());
# This should be exactly one greater than in the previous call.
select last_insert_id();
+# BUG#20339 - stored procedure using LAST_INSERT_ID() does not
+# replicate statement-based
+--disable_warnings
+drop procedure if exists foo;
+--enable_warnings
+delimiter |;
+create procedure foo()
+begin
+ declare res int;
+ insert into t2 (last_id) values (bug15728());
+ insert into t1 (last_id) values (bug15728());
+end|
+delimiter ;|
+call foo();
+
+select * from t1;
+select * from t2;
save_master_pos;
connection slave;
sync_with_master;
@@ -153,8 +189,108 @@ connection master;
drop function bug15728;
drop function bug15728_insert;
-drop table t1, t2;
+drop table t1;
+drop procedure foo;
+
+# test of BUG#20188 REPLACE or ON DUPLICATE KEY UPDATE in
+# auto_increment breaks binlog
+
+create table t1 (n int primary key auto_increment not null,
+b int, unique(b));
+
+# First, test that we do not call restore_auto_increment() too early
+# in write_record():
+set sql_log_bin=0;
+insert into t1 values(null,100);
+replace into t1 values(null,50),(null,100),(null,150);
+select * from t1 order by n;
+truncate table t1;
+set sql_log_bin=1;
+
+insert into t1 values(null,100);
+select * from t1 order by n;
+sync_slave_with_master;
+# make slave's table autoinc counter bigger
+insert into t1 values(null,200),(null,300);
+delete from t1 where b <> 100;
+# check that slave's table content is identical to master
+select * from t1 order by n;
+# only the auto_inc counter differs.
+
+connection master;
+replace into t1 values(null,100),(null,350);
+select * from t1 order by n;
+sync_slave_with_master;
+select * from t1 order by n;
+
+# Same test as for REPLACE, but for ON DUPLICATE KEY UPDATE
+
+# We first check that if we update a row using a value larger than the
+# table's counter, the counter for next row is bigger than the
+# after-value of the updated row.
+connection master;
+insert into t1 values (NULL,400),(3,500),(NULL,600) on duplicate key UPDATE n=1000;
+select * from t1 order by n;
+sync_slave_with_master;
+select * from t1 order by n;
+
+# and now test for the bug:
+connection master;
+drop table t1;
+create table t1 (n int primary key auto_increment not null,
+b int, unique(b));
+insert into t1 values(null,100);
+select * from t1 order by n;
+sync_slave_with_master;
+insert into t1 values(null,200),(null,300);
+delete from t1 where b <> 100;
+select * from t1 order by n;
+
+connection master;
+insert into t1 values(null,100),(null,350) on duplicate key update n=2;
+select * from t1 order by n;
+sync_slave_with_master;
+select * from t1 order by n;
+
+connection master;
+drop table t1;
# End of 5.0 tests
+# Test for BUG#20341 "stored function inserting into one
+# auto_increment puts bad data in slave"
+
+truncate table t2;
+create table t1 (id tinyint primary key); # no auto_increment
+
+delimiter |;
+create function insid() returns int
+begin
+ insert into t2 (last_id) values (0);
+ return 0;
+end|
+delimiter ;|
+set sql_log_bin=0;
+insert into t2 (id) values(1),(2),(3);
+delete from t2;
+set sql_log_bin=1;
+#inside SELECT, then inside INSERT
+select insid();
+set sql_log_bin=0;
+insert into t2 (id) values(5),(6),(7);
+delete from t2 where id>=5;
+set sql_log_bin=1;
+insert into t1 select insid();
+select * from t1;
+select * from t2;
+
sync_slave_with_master;
+select * from t1;
+select * from t2;
+
+connection master;
+drop table t1, t2;
+drop function insid;
+
+sync_slave_with_master;
+
diff --git a/mysql-test/extra/rpl_tests/rpl_insert_id_pk.test b/mysql-test/extra/rpl_tests/rpl_insert_id_pk.test
index a36c402b630..6feafc5a9a7 100644
--- a/mysql-test/extra/rpl_tests/rpl_insert_id_pk.test
+++ b/mysql-test/extra/rpl_tests/rpl_insert_id_pk.test
@@ -83,5 +83,6 @@ SET FOREIGN_KEY_CHECKS=0;
--error 1022, 1062
INSERT INTO t1 VALUES (1),(1);
sync_slave_with_master;
-
+connection master;
+drop table t1;
# End of 4.1 tests
diff --git a/mysql-test/extra/rpl_tests/rpl_loaddata.test b/mysql-test/extra/rpl_tests/rpl_loaddata.test
index 5d7c69bd959..08e89c20973 100644
--- a/mysql-test/extra/rpl_tests/rpl_loaddata.test
+++ b/mysql-test/extra/rpl_tests/rpl_loaddata.test
@@ -20,8 +20,11 @@ connection slave;
reset master;
connection master;
+select last_insert_id();
create table t1(a int not null auto_increment, b int, primary key(a) );
load data infile '../std_data_ln/rpl_loaddata.dat' into table t1;
+# verify that LAST_INSERT_ID() is set by LOAD DATA INFILE
+select last_insert_id();
create temporary table t2 (day date,id int(9),category enum('a','b','c'),name varchar(60));
load data infile '../std_data_ln/rpl_loaddata2.dat' into table t2 fields terminated by ',' optionally enclosed by '%' escaped by '@' lines terminated by '\n##\n' starting by '>' ignore 1 lines;
@@ -152,5 +155,5 @@ wait_for_slave_to_stop;
drop table t2;
connection master;
drop table t2;
-
+drop table t1;
# End of 4.1 tests
diff --git a/mysql-test/extra/rpl_tests/rpl_multi_update3.test b/mysql-test/extra/rpl_tests/rpl_multi_update3.test
index 90cc81cfd58..98fb085f0a8 100644
--- a/mysql-test/extra/rpl_tests/rpl_multi_update3.test
+++ b/mysql-test/extra/rpl_tests/rpl_multi_update3.test
@@ -217,4 +217,6 @@ select "-- SLAVE AFTER JOIN --" as "";
select * from t1;
select * from t2;
+connection master;
+DROP TABLE t1, t2;
# End of 4.1 tests
diff --git a/mysql-test/extra/rpl_tests/rpl_row_sp006.test b/mysql-test/extra/rpl_tests/rpl_row_sp006.test
index f40b3cbb078..25073e54991 100644
--- a/mysql-test/extra/rpl_tests/rpl_row_sp006.test
+++ b/mysql-test/extra/rpl_tests/rpl_row_sp006.test
@@ -76,6 +76,7 @@ DROP PROCEDURE IF EXISTS mysqltest1.p1;
DROP PROCEDURE IF EXISTS mysqltest1.p2;
DROP TABLE IF EXISTS mysqltest1.t1;
DROP TABLE IF EXISTS mysqltest1.t2;
+DROP DATABASE mysqltest1;
# Lets compare. Note: If they match test will pass, if they do not match
# the test will show that the diff statement failed and not reject file
diff --git a/mysql-test/include/check-testcase.test b/mysql-test/include/check-testcase.test
index 30cb7391f30..5bdbf6cd072 100644
--- a/mysql-test/include/check-testcase.test
+++ b/mysql-test/include/check-testcase.test
@@ -21,13 +21,13 @@ show databases;
#
# Dump the "test" database, all it's tables and their data
#
---exec $MYSQL_DUMP --skip-comments test
+--exec $MYSQL_DUMP --skip-comments --skip-lock-tables test
#
# Dump the "mysql" database and it's tables
# Select data separately to add "order by"
#
---exec $MYSQL_DUMP --skip-comments --no-data mysql
+--exec $MYSQL_DUMP --skip-comments --skip-lock-tables --no-data mysql
use mysql;
select * from columns_priv;
select * from db order by host, db, user;
diff --git a/mysql-test/include/ndb_default_cluster.inc b/mysql-test/include/ndb_default_cluster.inc
index 2f900b6a0b4..de7eda3c596 100644
--- a/mysql-test/include/ndb_default_cluster.inc
+++ b/mysql-test/include/ndb_default_cluster.inc
@@ -1,4 +1,4 @@
-- require r/ndb_default_cluster.require
disable_query_log;
-show status like "Ndb_connected_host";
+show status like "Ndb_config_from_host";
enable_query_log;
diff --git a/mysql-test/lib/mtr_cases.pl b/mysql-test/lib/mtr_cases.pl
index 448ca90d48d..b0ba27e6736 100644
--- a/mysql-test/lib/mtr_cases.pl
+++ b/mysql-test/lib/mtr_cases.pl
@@ -5,11 +5,14 @@
# same name.
use File::Basename;
+use IO::File();
use strict;
sub collect_test_cases ($);
sub collect_one_test_case ($$$$$$$);
+sub mtr_options_from_test_file($$);
+
##############################################################################
#
# Collect information about test cases we are to run
@@ -37,6 +40,23 @@ sub collect_test_cases ($) {
opendir(TESTDIR, $testdir) or mtr_error("Can't open dir \"$testdir\": $!");
+ # ----------------------------------------------------------------------
+ # Disable some tests listed in disabled.def
+ # ----------------------------------------------------------------------
+ my %disabled;
+ if ( open(DISABLED, "$testdir/disabled.def" ) )
+ {
+ while ( <DISABLED> )
+ {
+ chomp;
+ if ( /^\s*(\S+)\s*:\s*(.*?)\s*$/ )
+ {
+ $disabled{$1}= $2;
+ }
+ }
+ close DISABLED;
+ }
+
if ( @::opt_cases )
{
foreach my $tname ( @::opt_cases ) { # Run in specified order, no sort
@@ -100,30 +120,13 @@ sub collect_test_cases ($) {
}
}
- collect_one_test_case($testdir,$resdir,$tname,$elem,$cases,{},
+ collect_one_test_case($testdir,$resdir,$tname,$elem,$cases,\%disabled,
$component_id);
}
closedir TESTDIR;
}
else
{
- # ----------------------------------------------------------------------
- # Disable some tests listed in disabled.def
- # ----------------------------------------------------------------------
- my %disabled;
- if ( ! $::opt_ignore_disabled_def and open(DISABLED, "$testdir/disabled.def" ) )
- {
- while ( <DISABLED> )
- {
- chomp;
- if ( /^\s*(\S+)\s*:\s*(.*?)\s*$/ )
- {
- $disabled{$1}= $2;
- }
- }
- close DISABLED;
- }
-
foreach my $elem ( sort readdir(TESTDIR) ) {
my $component_id= undef;
my $tname= undef;
@@ -243,29 +246,25 @@ sub collect_one_test_case($$$$$$$) {
if ( $::opt_skip_rpl )
{
$tinfo->{'skip'}= 1;
+ $tinfo->{'comment'}= "No replication tests(--skip-rpl)";
return;
}
$tinfo->{'slave_num'}= 1; # Default, use one slave
- # FIXME currently we always restart slaves
- $tinfo->{'slave_restart'}= 1;
-
if ( $tname eq 'rpl_failsafe' or $tname eq 'rpl_chain_temp_table' )
{
-# $tinfo->{'slave_num'}= 3; # Not 3 ? Check old code, strange
+ # $tinfo->{'slave_num'}= 3; # Not 3 ? Check old code, strange
}
}
if ( defined mtr_match_prefix($tname,"federated") )
{
- $tinfo->{'slave_num'}= 1; # Default, use one slave
-
- # FIXME currently we always restart slaves
- $tinfo->{'slave_restart'}= 1;
+ # Default, federated uses the first slave as it's federated database
+ $tinfo->{'slave_num'}= 1;
}
- if ( $::opt_with_ndbcluster_all or defined mtr_match_substring($tname,"ndb") )
+ if ( $::opt_with_ndbcluster or defined mtr_match_substring($tname,"ndb") )
{
# This is an ndb test or all tests should be run with ndb cluster started
$tinfo->{'ndb_test'}= 1;
@@ -273,12 +272,14 @@ sub collect_one_test_case($$$$$$$) {
{
# All ndb test's should be skipped
$tinfo->{'skip'}= 1;
+ $tinfo->{'comment'}= "No ndbcluster test(--skip-ndbcluster)";
return;
}
- if ( ! $::opt_with_ndbcluster )
+ if ( ! $::opt_ndbcluster_supported )
{
# Ndb is not supported, skip them
$tinfo->{'skip'}= 1;
+ $tinfo->{'comment'}= "No ndbcluster support";
return;
}
}
@@ -287,9 +288,10 @@ sub collect_one_test_case($$$$$$$) {
# This is not a ndb test
$tinfo->{'ndb_test'}= 0;
if ( $::opt_with_ndbcluster_only )
- {
+ {
# Only the ndb test should be run, all other should be skipped
$tinfo->{'skip'}= 1;
+ $tinfo->{'comment'}= "Only ndbcluster tests(--with-ndbcluster-only)";
return;
}
}
@@ -383,6 +385,7 @@ sub collect_one_test_case($$$$$$$) {
if ( $::glob_win32_perl )
{
$tinfo->{'skip'}= 1;
+ $tinfo->{'comment'}= "No tests with sh scripts on Windows";
}
else
{
@@ -396,6 +399,7 @@ sub collect_one_test_case($$$$$$$) {
if ( $::glob_win32_perl )
{
$tinfo->{'skip'}= 1;
+ $tinfo->{'comment'}= "No tests with sh scripts on Windows";
}
else
{
@@ -414,45 +418,94 @@ sub collect_one_test_case($$$$$$$) {
}
# FIXME why this late?
+ my $marked_as_disabled= 0;
if ( $disabled->{$tname} )
{
- $tinfo->{'skip'}= 1;
- $tinfo->{'disable'}= 1; # Sub type of 'skip'
- $tinfo->{'comment'}= $disabled->{$tname} if $disabled->{$tname};
+ $marked_as_disabled= 1;
+ $tinfo->{'comment'}= $disabled->{$tname};
}
if ( -f $disabled_file )
{
- $tinfo->{'skip'}= 1;
- $tinfo->{'disable'}= 1; # Sub type of 'skip'
+ $marked_as_disabled= 1;
$tinfo->{'comment'}= mtr_fromfile($disabled_file);
}
+ # If test was marked as disabled, either opt_enable_disabled is off and then
+ # we skip this test, or it is on and then we run this test but warn
+
+ if ( $marked_as_disabled )
+ {
+ if ( $::opt_enable_disabled )
+ {
+ $tinfo->{'dont_skip_though_disabled'}= 1;
+ }
+ else
+ {
+ $tinfo->{'skip'}= 1;
+ $tinfo->{'disable'}= 1; # Sub type of 'skip'
+ }
+ }
+
if ( $component_id eq 'im' )
{
if ( $::glob_use_embedded_server )
{
$tinfo->{'skip'}= 1;
-
- mtr_report(
- "Instance Manager tests are not available in embedded mode. " .
- "Test case '$tname' is skipped.");
+ $tinfo->{'comment'}= "No IM with embedded server";
}
elsif ( $::opt_ps_protocol )
{
$tinfo->{'skip'}= 1;
-
- mtr_report(
- "Instance Manager tests are not run with --ps-protocol. " .
- "Test case '$tname' is skipped.");
+ $tinfo->{'comment'}= "No IM with --ps-protocol";
}
elsif ( $::opt_skip_im )
{
$tinfo->{'skip'}= 1;
+ $tinfo->{'comment'}= "No IM tests(--skip-im)";
+ }
+ }
+ else
+ {
+ mtr_options_from_test_file($tinfo,"$testdir/${tname}.test");
+
+ if ( ! $tinfo->{'innodb_test'} )
+ {
+ # mtr_verbose("Adding '--skip-innodb' to $tinfo->{'name'}");
+ # FIXME activate the --skip-innodb only when running with
+ # selected test cases
+ # push(@{$tinfo->{'master_opt'}}, "--skip-innodb");
+ }
+
+ if ( $tinfo->{'big_test'} and ! $::opt_big_test )
+ {
+ $tinfo->{'skip'}= 1;
+ $tinfo->{'comment'}= "Test need 'big-test' option";
+ }
- mtr_report(
- "Instance Manager executable is unavailable." .
- "Test case '$tname' is skipped.");
+ if ( $tinfo->{'ndb_extra'} and ! $::opt_ndb_extra_test )
+ {
+ $tinfo->{'skip'}= 1;
+ $tinfo->{'comment'}= "Test need 'ndb_extra' option";
+ }
+
+ if ( $tinfo->{'require_manager'} )
+ {
+ $tinfo->{'skip'}= 1;
+ $tinfo->{'comment'}= "Test need the _old_ manager(to be removed)";
+ }
+
+ if ( defined $tinfo->{'binlog_format'} and
+ ! ( $tinfo->{'binlog_format'} eq $::used_binlog_format ) )
+ {
+ $tinfo->{'skip'}= 1;
+ $tinfo->{'comment'}= "Not running with binlog format '$tinfo->{'binlog_format'}'";
+ }
+
+ if ( $tinfo->{'need_debug'} && ! $::debug_compiled_binaries )
+ {
+ $tinfo->{'skip'}= 1;
+ $tinfo->{'comment'}= "Test need debug binaries";
}
}
@@ -462,8 +515,59 @@ sub collect_one_test_case($$$$$$$) {
( $tinfo->{'master_restart'} or $tinfo->{'slave_restart'} ) )
{
$tinfo->{'skip'}= 1;
+ $tinfo->{'comment'}= "Can't restart a running server";
}
+
}
+# List of tags in the .test files that if found should set
+# the specified value in "tinfo"
+our @tags=
+(
+ ["include/have_innodb.inc", "innodb_test", 1],
+ ["include/have_binlog_format_row.inc", "binlog_format", "row"],
+ ["include/have_binlog_format_statement.inc", "binlog_format", "stmt"],
+ ["include/big_test.inc", "big_test", 1],
+ ["include/have_debug.inc", "need_debug", 1],
+ ["include/have_ndb_extra.inc", "ndb_extra", 1],
+ ["require_manager", "require_manager", 1],
+);
+
+sub mtr_options_from_test_file($$) {
+ my $tinfo= shift;
+ my $file= shift;
+ #mtr_verbose("$file");
+ my $F= IO::File->new($file) or mtr_error("can't open file \"$file\": $!");
+
+ while ( my $line= <$F> )
+ {
+ chomp;
+
+ next if ( $line !~ /^--/ );
+
+ # Match this line against tag in "tags" array
+ foreach my $tag (@tags)
+ {
+ if ( index($line, $tag->[0]) >= 0 )
+ {
+ # Tag matched, assign value to "tinfo"
+ $tinfo->{"$tag->[1]"}= $tag->[2];
+ }
+ }
+
+ # If test sources another file, open it as well
+ if ( $line =~ /^\-\-([[:space:]]*)source(.*)$/ )
+ {
+ my $value= $2;
+ $value =~ s/^\s+//; # Remove leading space
+ $value =~ s/[[:space:]]+$//; # Remove ending space
+
+ my $sourced_file= "$::glob_mysql_test_dir/$value";
+ mtr_options_from_test_file($tinfo, $sourced_file);
+ }
+
+ }
+}
+
1;
diff --git a/mysql-test/lib/mtr_process.pl b/mysql-test/lib/mtr_process.pl
index 1b2ce17a689..ca1ae13978d 100644
--- a/mysql-test/lib/mtr_process.pl
+++ b/mysql-test/lib/mtr_process.pl
@@ -14,13 +14,18 @@ use POSIX 'WNOHANG';
sub mtr_run ($$$$$$;$);
sub mtr_spawn ($$$$$$;$);
-sub mtr_stop_mysqld_servers ($);
+sub mtr_check_stop_servers ($);
sub mtr_kill_leftovers ();
+sub mtr_wait_blocking ($);
sub mtr_record_dead_children ();
+sub mtr_ndbmgm_start($$);
+sub mtr_mysqladmin_start($$$);
sub mtr_exit ($);
sub sleep_until_file_created ($$$);
sub mtr_kill_processes ($);
-sub mtr_kill_process ($$$$);
+sub mtr_ping_with_timeout($);
+sub mtr_ping_port ($);
+sub mtr_kill_process ($$$);
# static in C
sub spawn_impl ($$$$$$$$);
@@ -32,7 +37,6 @@ sub spawn_impl ($$$$$$$$);
##############################################################################
# This function try to mimic the C version used in "netware/mysql_test_run.c"
-# FIXME learn it to handle append mode as well, a "new" flag or a "append"
sub mtr_run ($$$$$$;$) {
my $path= shift;
@@ -268,40 +272,17 @@ sub spawn_parent_impl {
last;
}
- # If one of the mysqld processes died, we want to
+ # If one of the processes died, we want to
# mark this, and kill the mysqltest process.
- foreach my $idx (0..1)
- {
- if ( $::master->[$idx]->{'pid'} eq $ret_pid )
- {
- mtr_debug("child $ret_pid was master[$idx], " .
- "exit during mysqltest run");
- $::master->[$idx]->{'pid'}= 0;
- last;
- }
- }
-
- foreach my $idx (0..2)
- {
- if ( $::slave->[$idx]->{'pid'} eq $ret_pid )
- {
- mtr_debug("child $ret_pid was slave[$idx], " .
- "exit during mysqltest run");
- $::slave->[$idx]->{'pid'}= 0;
- last;
- }
- }
-
- mtr_debug("waitpid() catched exit of unknown child $ret_pid, " .
- "exit during mysqltest run");
+ mark_process_dead($ret_pid);
}
if ( $ret_pid != $pid )
{
# We terminated the waiting because a "mysqld" process died.
# Kill the mysqltest process.
-
+ mtr_verbose("Kill mysqltest because another process died");
kill(9,$pid);
$ret_pid= waitpid($pid,0);
@@ -347,49 +328,72 @@ sub mtr_process_exit_status {
#
##############################################################################
-# We just "ping" on the ports, and if we can't do a socket connect
-# we assume the server is dead. So we don't *really* know a server
-# is dead, we just hope that it after letting the listen port go,
-# it is dead enough for us to start a new server.
+# Kill all processes(mysqld, ndbd, ndb_mgmd and im) that would conflict with
+# this run
+# Make sure to remove the PID file, if any.
+# kill IM manager first, else it will restart the servers
sub mtr_kill_leftovers () {
- # First, kill all masters and slaves that would conflict with
- # this run. Make sure to remove the PID file, if any.
- # FIXME kill IM manager first, else it will restart the servers, how?!
- my @args;
+ my @kill_pids;
+ my %admin_pids;
+ my $pid;
- for ( my $idx; $idx < 2; $idx++ )
+ #Start shutdown of instance_managers, masters and slaves
+ foreach my $srv ($::instance_manager,
+ @{$::instance_manager->{'instances'}},
+ @{$::master},@{$::slave})
{
- push(@args,{
- pid => 0, # We don't know the PID
- pidfile => $::instance_manager->{'instances'}->[$idx]->{'path_pid'},
- sockfile => $::instance_manager->{'instances'}->[$idx]->{'path_sock'},
- port => $::instance_manager->{'instances'}->[$idx]->{'port'},
- });
- }
+ $pid= mtr_mysqladmin_start($srv, "shutdown", 70);
- for ( my $idx; $idx < 2; $idx++ )
- {
- push(@args,{
- pid => 0, # We don't know the PID
- pidfile => $::master->[$idx]->{'path_mypid'},
- sockfile => $::master->[$idx]->{'path_mysock'},
- port => $::master->[$idx]->{'path_myport'},
- });
+ # Save the pid of the mysqladmin process
+ $admin_pids{$pid}= 1;
+
+ push(@kill_pids,{
+ pid => $srv->{'pid'},
+ pidfile => $srv->{'path_pid'},
+ sockfile => $srv->{'path_sock'},
+ port => $srv->{'port'},
+ });
+ $srv->{'pid'}= 0; # Assume we are done with it
}
- for ( my $idx; $idx < 3; $idx++ )
+ # Start shutdown of clusters
+ foreach my $cluster (@{$::clusters})
{
- push(@args,{
- pid => 0, # We don't know the PID
- pidfile => $::slave->[$idx]->{'path_mypid'},
- sockfile => $::slave->[$idx]->{'path_mysock'},
- port => $::slave->[$idx]->{'path_myport'},
- });
+ $pid= mtr_ndbmgm_start($cluster, "shutdown");
+
+ # Save the pid of the ndb_mgm process
+ $admin_pids{$pid}= 1;
+
+ push(@kill_pids,{
+ pid => $cluster->{'pid'},
+ pidfile => $cluster->{'path_pid'}
+ });
+
+ $cluster->{'pid'}= 0; # Assume we are done with it
+
+
+ foreach my $ndbd (@{$cluster->{'ndbds'}})
+ {
+ push(@kill_pids,{
+ pid => $ndbd->{'pid'},
+ pidfile => $ndbd->{'path_pid'},
+ });
+ $ndbd->{'pid'}= 0; # Assume we are done with it
+ }
+
}
- mtr_mysqladmin_shutdown(\@args, 20);
+ # Wait for all the admin processes to complete
+ mtr_wait_blocking(\%admin_pids);
+
+ # If we trusted "mysqladmin --shutdown_timeout= ..." we could just
+ # terminate now, but we don't (FIXME should be debugged).
+ # So we try again to ping and at least wait the same amount of time
+ # mysqladmin would for all to die.
+
+ mtr_ping_with_timeout(\@kill_pids);
# We now have tried to terminate nice. We have waited for the listen
# port to be free, but can't really tell if the mysqld process died
@@ -454,7 +458,7 @@ sub mtr_kill_leftovers () {
do
{
kill(9, @pids);
- mtr_debug("Sleep 1 second waiting for processes to die");
+ mtr_report("Sleep 1 second waiting for processes to die");
sleep(1) # Wait one second
} while ( $retries-- and kill(0, @pids) );
@@ -466,53 +470,61 @@ sub mtr_kill_leftovers () {
}
}
- # We may have failed everything, bug we now check again if we have
+ # We may have failed everything, but we now check again if we have
# the listen ports free to use, and if they are free, just go for it.
- foreach my $srv ( @args )
+ foreach my $srv ( @kill_pids )
{
- if ( mtr_ping_mysqld_server($srv->{'port'}, $srv->{'sockfile'}) )
+ if ( defined $srv->{'port'} and mtr_ping_port($srv->{'port'}) )
{
- mtr_warning("can't kill old mysqld holding port $srv->{'port'}");
+ mtr_warning("can't kill old process holding port $srv->{'port'}");
}
}
}
-##############################################################################
-#
-# Shut down mysqld servers we have started from this run of this script
-#
-##############################################################################
-
-# To speed things we kill servers in parallel. The argument is a list
-# of 'ports', 'pids', 'pidfiles' and 'socketfiles'.
+# Check that all processes in list are killed
+# The argument is a list of 'ports', 'pids', 'pidfiles' and 'socketfiles'
+# for which shutdown has been started. Make sure they all get killed
+# in one way or the other.
+#
# FIXME On Cygwin, and maybe some other platforms, $srv->{'pid'} and
-# $srv->{'pidfile'} will not be the same PID. We need to try to kill
+# the pid in $srv->{'pidfile'} will not be the same PID. We need to try to kill
# both I think.
-sub mtr_stop_mysqld_servers ($) {
+sub mtr_check_stop_servers ($) {
my $spec= shift;
- # ----------------------------------------------------------------------
- # First try nice normal shutdown using 'mysqladmin'
- # ----------------------------------------------------------------------
+ # Return if no processes are defined
+ return if ! @$spec;
- # Shutdown time must be high as slave may be in reconnect
- mtr_mysqladmin_shutdown($spec, 70);
+ #mtr_report("mtr_check_stop_servers");
+
+ mtr_ping_with_timeout(\@$spec);
# ----------------------------------------------------------------------
# We loop with waitpid() nonblocking to see how many of the ones we
- # are to kill, actually got killed by mtr_mysqladmin_shutdown().
- # Note that we don't rely on this, the mysqld server might have stop
+ # are to kill, actually got killed by mysqladmin or ndb_mgm
+ #
+ # Note that we don't rely on this, the mysqld server might have stopped
# listening to the port, but still be alive. But it is a start.
# ----------------------------------------------------------------------
foreach my $srv ( @$spec )
{
- if ( $srv->{'pid'} and (waitpid($srv->{'pid'},&WNOHANG) == $srv->{'pid'}) )
+ my $ret_pid;
+ if ( $srv->{'pid'} )
{
- $srv->{'pid'}= 0;
+ $ret_pid= waitpid($srv->{'pid'},&WNOHANG);
+ if ($ret_pid == $srv->{'pid'})
+ {
+ mtr_verbose("Caught exit of process $ret_pid");
+ $srv->{'pid'}= 0;
+ }
+ else
+ {
+ # mtr_warning("caught exit of unknown child $ret_pid");
+ }
}
}
@@ -546,13 +558,12 @@ sub mtr_stop_mysqld_servers ($) {
}
# ----------------------------------------------------------------------
- # If the processes where started from this script, and we had no PIDS
+ # If all the processes in list already have been killed,
# then we don't have to do anything.
# ----------------------------------------------------------------------
if ( ! keys %mysqld_pids )
{
- # cluck "This is how we got here!";
return;
}
@@ -601,107 +612,127 @@ sub mtr_stop_mysqld_servers ($) {
foreach my $file ($srv->{'pidfile'}, $srv->{'sockfile'})
{
# Know it is dead so should be no race, careful anyway
- if ( -f $file and ! unlink($file) and -f $file )
+ if ( defined $file and -f $file and ! unlink($file) and -f $file )
{
$errors++;
mtr_warning("couldn't delete $file");
}
}
+ $srv->{'pid'}= 0;
}
}
}
if ( $errors )
{
- # We are in trouble, just die....
- mtr_error("we could not kill or clean up all processes");
+ # There where errors killing processes
+ # do one last attempt to ping the servers
+ # and if they can't be pinged, assume they are dead
+ if ( ! mtr_ping_with_timeout( \@$spec ) )
+ {
+ mtr_error("we could not kill or clean up all processes");
+ }
+ else
+ {
+ mtr_verbose("All ports where free, continuing");
+ }
}
}
# FIXME We just assume they are all dead, for Cygwin we are not
# really sure
-
+
}
+# Wait for all the process in the list to terminate
+sub mtr_wait_blocking($) {
+ my $admin_pids= shift;
-##############################################################################
-#
-# Shut down mysqld servers using "mysqladmin ... shutdown".
-# To speed this up, we start them in parallel and use waitpid() to
-# catch their termination. Note that this doesn't say the servers
-# are terminated, just that 'mysqladmin' is terminated.
-#
-# Note that mysqladmin will ask the server about what PID file it uses,
-# and mysqladmin will wait for it to be removed before it terminates
-# (unless passes timeout).
-#
-# This function will take at most about 20 seconds, and we still are not
-# sure we killed them all. If none is responding to ping, we return 1,
-# else we return 0.
-#
-##############################################################################
-sub mtr_mysqladmin_shutdown {
- my $spec= shift;
- my $adm_shutdown_tmo= shift;
+ # Return if no processes defined
+ return if ! %$admin_pids;
- my %mysql_admin_pids;
+ mtr_verbose("mtr_wait_blocking");
- # Start one "mysqladmin shutdown" for each server
- foreach my $srv ( @$spec )
+ # Wait for all the started processes to exit
+ # As mysqladmin is such a simple program, we trust it to terminate itself.
+ # I.e. we wait blocking, and wait for them all before we go on.
+ foreach my $pid (keys %{$admin_pids})
{
- my $args;
+ my $ret_pid= waitpid($pid,0);
- mtr_init_args(\$args);
+ }
+}
- mtr_add_arg($args, "--no-defaults");
- mtr_add_arg($args, "--user=%s", $::opt_user);
- mtr_add_arg($args, "--password=");
- mtr_add_arg($args, "--silent");
- if ( -e $srv->{'sockfile'} )
- {
- mtr_add_arg($args, "--socket=%s", $srv->{'sockfile'});
- }
- if ( $srv->{'port'} )
- {
- mtr_add_arg($args, "--port=%s", $srv->{'port'});
- }
- if ( $srv->{'port'} and ! -e $srv->{'sockfile'} )
- {
- mtr_add_arg($args, "--protocol=tcp"); # Needed if no --socket
- }
- mtr_add_arg($args, "--connect_timeout=5");
- # Shutdown time must be high as slave may be in reconnect
- mtr_add_arg($args, "--shutdown_timeout=$adm_shutdown_tmo");
- mtr_add_arg($args, "shutdown");
- my $path_mysqladmin_log= "$::opt_vardir/log/mysqladmin.log";
- # Start mysqladmin in paralell and wait for termination later
- my $pid= mtr_spawn($::exe_mysqladmin, $args,
- "", $path_mysqladmin_log, $path_mysqladmin_log, "",
- { append_log_file => 1 });
- # Save the pid of the mysqladmin process
- $mysql_admin_pids{$pid}= 1;
+# Start "mysqladmin shutdown" for a specific mysqld
+sub mtr_mysqladmin_start($$$) {
+ my $srv= shift;
+ my $command= shift;
+ my $adm_shutdown_tmo= shift;
- # We don't wait for termination of mysqladmin
- }
+ my $args;
+ mtr_init_args(\$args);
- # Wait for all the started mysqladmin to exit
- # As mysqladmin is such a simple program, we trust it to terminate.
- # I.e. we wait blocking, and wait wait for them all before we go on.
- foreach my $pid (keys %mysql_admin_pids)
+ mtr_add_arg($args, "--no-defaults");
+ mtr_add_arg($args, "--user=%s", $::opt_user);
+ mtr_add_arg($args, "--password=");
+ mtr_add_arg($args, "--silent");
+ if ( -e $srv->{'path_sock'} )
{
- my $ret_pid= waitpid($pid,0);
-
- # If this was any of the mysqladmin's we waited for, delete its
- # pid from list
- delete $mysql_admin_pids{$ret_pid} if exists $mysql_admin_pids{$ret_pid};
+ mtr_add_arg($args, "--socket=%s", $srv->{'path_sock'});
+ }
+ if ( $srv->{'port'} )
+ {
+ mtr_add_arg($args, "--port=%s", $srv->{'port'});
+ }
+ if ( $srv->{'port'} and ! -e $srv->{'path_sock'} )
+ {
+ mtr_add_arg($args, "--protocol=tcp"); # Needed if no --socket
}
+ mtr_add_arg($args, "--connect_timeout=5");
- # If we trusted "mysqladmin --shutdown_timeout= ..." we could just
- # terminate now, but we don't (FIXME should be debugged).
- # So we try again to ping and at least wait the same amount of time
- # mysqladmin would for all to die.
+ # Shutdown time must be high as slave may be in reconnect
+ mtr_add_arg($args, "--shutdown_timeout=$adm_shutdown_tmo");
+ mtr_add_arg($args, "$command");
+ my $path_mysqladmin_log= "$::opt_vardir/log/mysqladmin.log";
+ my $pid= mtr_spawn($::exe_mysqladmin, $args,
+ "", $path_mysqladmin_log, $path_mysqladmin_log, "",
+ { append_log_file => 1 });
+ mtr_verbose("mtr_mysqladmin_start, pid: $pid");
+ return $pid;
+
+}
+
+# Start "ndb_mgm shutdown" for a specific cluster, it will
+# shutdown all data nodes and leave the ndb_mgmd running
+sub mtr_ndbmgm_start($$) {
+ my $cluster= shift;
+ my $command= shift;
+
+ my $args;
- my $timeout= 20; # 20 seconds max
+ mtr_init_args(\$args);
+
+ mtr_add_arg($args, "--no-defaults");
+ mtr_add_arg($args, "--core");
+ mtr_add_arg($args, "--try-reconnect=1");
+ mtr_add_arg($args, "--ndb_connectstring=%s", $cluster->{'connect_string'});
+ mtr_add_arg($args, "-e");
+ mtr_add_arg($args, "$command");
+
+ my $pid= mtr_spawn($::exe_ndb_mgm, $args,
+ "", "/dev/null", "/dev/null", "",
+ {});
+ mtr_verbose("mtr_ndbmgm_start, pid: $pid");
+ return $pid;
+
+}
+
+
+# Ping all servers in list, exit when none of them answers
+# or when timeout has passed
+sub mtr_ping_with_timeout($) {
+ my $spec= shift;
+ my $timeout= 200; # 20 seconds max
my $res= 1; # If we just fall through, we are done
# in the sense that the servers don't
# listen to their ports any longer
@@ -711,22 +742,75 @@ sub mtr_mysqladmin_shutdown {
foreach my $srv ( @$spec )
{
$res= 1; # We are optimistic
- if ( mtr_ping_mysqld_server($srv->{'port'}, $srv->{'sockfile'}) )
+ if ( $srv->{'pid'} and defined $srv->{'port'} )
{
- mtr_debug("Sleep 1 second waiting for processes to stop using port");
- sleep(1); # One second
- $res= 0;
- next TIME;
+ if ( mtr_ping_port($srv->{'port'}) )
+ {
+ mtr_verbose("waiting for process $srv->{'pid'} to stop ".
+ "using port $srv->{'port'}");
+
+ # Millisceond sleep emulated with select
+ select(undef, undef, undef, (0.1));
+ $res= 0;
+ next TIME;
+ }
+ else
+ {
+ # Process was not using port
+ }
}
}
last; # If we got here, we are done
}
- $timeout or mtr_debug("At least one server is still listening to its port");
+ $timeout or mtr_report("At least one server is still listening to its port");
return $res;
}
+
+#
+# Loop through our list of processes and look for and entry
+# with the provided pid
+# Set the pid of that process to 0 if found
+#
+sub mark_process_dead($)
+{
+ my $ret_pid= shift;
+
+ foreach my $mysqld (@{$::master}, @{$::slave})
+ {
+ if ( $mysqld->{'pid'} eq $ret_pid )
+ {
+ mtr_verbose("$mysqld->{'type'} $mysqld->{'idx'} exited, pid: $ret_pid");
+ $mysqld->{'pid'}= 0;
+ return;
+ }
+ }
+
+ foreach my $cluster (@{$::clusters})
+ {
+ if ( $cluster->{'pid'} eq $ret_pid )
+ {
+ mtr_verbose("$cluster->{'name'} cluster ndb_mgmd exited, pid: $ret_pid");
+ $cluster->{'pid'}= 0;
+ return;
+ }
+
+ foreach my $ndbd (@{$cluster->{'ndbds'}})
+ {
+ if ( $ndbd->{'pid'} eq $ret_pid )
+ {
+ mtr_verbose("$cluster->{'name'} cluster ndbd exited, pid: $ret_pid");
+ $ndbd->{'pid'}= 0;
+ return;
+ }
+ }
+ }
+ mtr_warning("mark_process_dead couldn't find an entry for pid: $ret_pid");
+
+}
+
##############################################################################
#
# The operating system will keep information about dead children,
@@ -743,25 +827,8 @@ sub mtr_record_dead_children () {
# -1 or 0 means there are no more procesess to wait for
while ( ($ret_pid= waitpid(-1,&WNOHANG)) != 0 and $ret_pid != -1)
{
- mtr_debug("waitpid() catched exit of child $ret_pid");
- foreach my $idx (0..1)
- {
- if ( $::master->[$idx]->{'pid'} eq $ret_pid )
- {
- mtr_debug("child $ret_pid was master[$idx]");
- $::master->[$idx]->{'pid'}= 0;
- }
- }
-
- foreach my $idx (0..2)
- {
- if ( $::slave->[$idx]->{'pid'} eq $ret_pid )
- {
- mtr_debug("child $ret_pid was slave[$idx]");
- $::slave->[$idx]->{'pid'}= 0;
- last;
- }
- }
+ mtr_warning("mtr_record_dead_children: $ret_pid");
+ mark_process_dead($ret_pid);
}
}
@@ -777,7 +844,8 @@ sub start_reap_all {
my $pid;
while(($pid= waitpid(-1, &WNOHANG)) != 0 and $pid != -1)
{
- print "start_reap_all: pid: $pid.\n";
+ mtr_warning("start_reap_all pid: $pid");
+ mark_process_dead($pid);
};
}
@@ -785,9 +853,12 @@ sub stop_reap_all {
$SIG{CHLD}= 'DEFAULT';
}
-sub mtr_ping_mysqld_server () {
+
+sub mtr_ping_port ($) {
my $port= shift;
+ mtr_verbose("mtr_ping_port: $port");
+
my $remote= "localhost";
my $iaddr= inet_aton($remote);
if ( ! $iaddr )
@@ -803,10 +874,12 @@ sub mtr_ping_mysqld_server () {
if ( connect(SOCK, $paddr) )
{
close(SOCK); # FIXME check error?
+ mtr_verbose("USED");
return 1;
}
else
{
+ mtr_verbose("FREE");
return 0;
}
}
@@ -833,18 +906,18 @@ sub sleep_until_file_created ($$$) {
return $pid;
}
- # Check if it died after the fork() was successful
+ # Check if it died after the fork() was successful
if ( $pid != 0 && waitpid($pid,&WNOHANG) == $pid )
{
+ mtr_warning("Process $pid died");
return 0;
}
- mtr_debug("Sleep $sleeptime milliseconds waiting for ".
- "creation of $pidfile");
+ mtr_debug("Sleep $sleeptime milliseconds waiting for $pidfile");
# Print extra message every 60 seconds
my $seconds= ($loop * $sleeptime) / 1000;
- if ( $seconds > 1 and $seconds % 60 == 0 )
+ if ( $seconds > 1 and int($seconds) % 60 == 0 )
{
my $left= $timeout - $seconds;
mtr_warning("Waited $seconds seconds for $pidfile to be created, " .
@@ -862,46 +935,38 @@ sub sleep_until_file_created ($$$) {
sub mtr_kill_processes ($) {
my $pids = shift;
- foreach my $sig (15, 9)
+ mtr_verbose("mtr_kill_processes " . join(" ", @$pids));
+
+ foreach my $pid (@$pids)
{
- my $retries= 10;
- while (1)
+ foreach my $sig (15, 9)
{
- kill($sig, @{$pids});
- last unless kill (0, @{$pids}) and $retries--;
-
- mtr_debug("Sleep 2 second waiting for processes to die");
- sleep(2);
+ last if mtr_kill_process($pid, $sig, 10);
}
}
}
-sub mtr_kill_process ($$$$) {
+sub mtr_kill_process ($$$) {
my $pid= shift;
my $signal= shift;
- my $total_retries= shift;
- my $timeout= shift;
+ my $timeout= shift; # Seconds to wait for process
+ my $max_loop= $timeout*10; # Sleeping 0.1 between each kill attempt
- for (my $cur_attempt= 1; $cur_attempt <= $total_retries; ++$cur_attempt)
+ for (my $cur_attempt= 1; $cur_attempt <= $max_loop; ++$cur_attempt)
{
mtr_debug("Sending $signal to $pid...");
kill($signal, $pid);
- unless (kill (0, $pid))
- {
- mtr_debug("Process $pid died.");
- return;
- }
+ last unless kill (0, $pid) and $max_loop--;
- mtr_debug("Sleeping $timeout second(s) waiting for processes to die...");
+ mtr_verbose("Sleep 0.1 second waiting for processes $pid to die");
- sleep($timeout);
+ select(undef, undef, undef, 0.1);
}
- mtr_debug("Process $pid is still alive after $total_retries " .
- "of sending signal $signal.");
+ return $max_loop;
}
##############################################################################
@@ -913,7 +978,7 @@ sub mtr_kill_process ($$$$) {
# FIXME something is wrong, we sometimes terminate with "Hangup" written
# to tty, and no STDERR output telling us why.
-# FIXME for some readon, setting HUP to 'IGNORE' will cause exit() to
+# FIXME for some reason, setting HUP to 'IGNORE' will cause exit() to
# write out "Hangup", and maybe loose some output. We insert a sleep...
sub mtr_exit ($) {
diff --git a/mysql-test/lib/mtr_report.pl b/mysql-test/lib/mtr_report.pl
index f2da89355f7..6e3796133f2 100644
--- a/mysql-test/lib/mtr_report.pl
+++ b/mysql-test/lib/mtr_report.pl
@@ -10,6 +10,7 @@ sub mtr_report_test_name($);
sub mtr_report_test_passed($);
sub mtr_report_test_failed($);
sub mtr_report_test_skipped($);
+sub mtr_report_test_not_skipped_though_disabled($);
sub mtr_show_failed_diff ($);
sub mtr_report_stats ($);
@@ -21,6 +22,7 @@ sub mtr_warning (@);
sub mtr_error (@);
sub mtr_child_error (@);
sub mtr_debug (@);
+sub mtr_verbose (@);
##############################################################################
@@ -96,7 +98,24 @@ sub mtr_report_test_skipped ($) {
}
else
{
- print "[ skipped ]\n";
+ print "[ skipped ] $tinfo->{'comment'}\n";
+ }
+}
+
+sub mtr_report_tests_not_skipped_though_disabled ($) {
+ my $tests= shift;
+
+ if ( $::opt_enable_disabled )
+ {
+ my @disabled_tests= grep {$_->{'dont_skip_though_disabled'}} @$tests;
+ if ( @disabled_tests )
+ {
+ print "\nTest(s) which will be run though they are marked as disabled:\n";
+ foreach my $tinfo ( sort {$a->{'name'} cmp $b->{'name'}} @disabled_tests )
+ {
+ printf " %-20s : %s\n", $tinfo->{'name'}, $tinfo->{'comment'};
+ }
+ }
}
}
@@ -107,7 +126,7 @@ sub mtr_report_test_passed ($) {
if ( $::opt_timer and -f "$::opt_vardir/log/timer" )
{
$timer= mtr_fromfile("$::opt_vardir/log/timer");
- $::glob_tot_real_time += $timer;
+ $::glob_tot_real_time += ($timer/1000);
$timer= sprintf "%12s", $timer;
}
$tinfo->{'result'}= 'MTR_RES_PASSED';
@@ -122,7 +141,7 @@ sub mtr_report_test_failed ($) {
{
print "[ fail ] timeout\n";
}
- elsif ( $tinfo->{'ndb_test'} and !$::flag_ndb_status_ok)
+ elsif ( $tinfo->{'ndb_test'} and $::cluster->[0]->{'installed_ok'} eq "NO")
{
print "[ fail ] ndbcluster start failure\n";
return;
@@ -157,6 +176,7 @@ sub mtr_report_stats ($) {
my $tot_passed= 0;
my $tot_failed= 0;
my $tot_tests= 0;
+ my $tot_restarts= 0;
my $found_problems= 0; # Some warnings are errors...
foreach my $tinfo (@$tests)
@@ -175,6 +195,10 @@ sub mtr_report_stats ($) {
$tot_tests++;
$tot_failed++;
}
+ if ( $tinfo->{'restarted'} )
+ {
+ $tot_restarts++;
+ }
}
# ----------------------------------------------------------------------
@@ -197,6 +221,14 @@ sub mtr_report_stats ($) {
"the documentation at\n",
"http://www.mysql.com/doc/en/MySQL_test_suite.html\n";
}
+ print
+ "The servers were restarted $tot_restarts times\n";
+
+ if ( $::opt_timer )
+ {
+ print
+ "Spent $::glob_tot_real_time seconds actually executing testcases\n"
+ }
# ----------------------------------------------------------------------
# If a debug run, there might be interesting information inside
@@ -335,5 +367,11 @@ sub mtr_debug (@) {
print STDERR "####: ",join(" ", @_),"\n";
}
}
+sub mtr_verbose (@) {
+ if ( $::opt_verbose )
+ {
+ print STDERR "> ",join(" ", @_),"\n";
+ }
+}
1;
diff --git a/mysql-test/lib/mtr_stress.pl b/mysql-test/lib/mtr_stress.pl
index 92bb220461b..a7d4b68b69d 100644
--- a/mysql-test/lib/mtr_stress.pl
+++ b/mysql-test/lib/mtr_stress.pl
@@ -25,10 +25,9 @@ sub run_stress_test ()
mtr_report("Starting stress testing\n");
- if ( ! $::glob_use_embedded_server and ! $::opt_local_master )
+ if ( ! $::glob_use_embedded_server )
{
- $::master->[0]->{'pid'}= mysqld_start('master',0,[],[],0);
- if ( ! $::master->[0]->{'pid'} )
+ if ( ! mysqld_start($::master->[0],[],[]) )
{
mtr_error("Can't start the mysqld server");
}
diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl
index f757c59d90e..daad3bd23f2 100755
--- a/mysql-test/mysql-test-run.pl
+++ b/mysql-test/mysql-test-run.pl
@@ -145,9 +145,6 @@ our $glob_use_running_ndbcluster_slave= 0;
our $glob_use_embedded_server= 0;
our @glob_test_mode;
-our $using_ndbcluster_master= 0;
-our $using_ndbcluster_slave= 0;
-
our $glob_basedir;
# The total result
@@ -171,6 +168,7 @@ our $opt_suite;
our $opt_netware;
our $opt_script_debug= 0; # Script debugging, enable with --script-debug
+our $opt_verbose= 0; # Verbose output, enable with --verbose
# Options FIXME not all....
@@ -187,6 +185,8 @@ our $exe_mysqlimport; # Called from test case
our $exe_mysqlshow; # Called from test case
our $exe_mysql_fix_system_tables;
our $exe_mysqltest;
+our $exe_ndbd;
+our $exe_ndb_mgmd;
our $exe_slave_mysqld;
our $exe_im;
our $exe_my_print_defaults;
@@ -216,6 +216,7 @@ our $opt_extern;
our $opt_fast;
our $opt_force;
our $opt_reorder;
+our $opt_enable_disabled;
our $opt_gcov;
our $opt_gcov_err;
@@ -237,11 +238,9 @@ our $opt_gprof_dir;
our $opt_gprof_master;
our $opt_gprof_slave;
-our $opt_local;
-our $opt_local_master;
-
our $master; # Will be struct in C
our $slave;
+our $clusters;
our $instance_manager;
@@ -250,8 +249,6 @@ our $opt_ndbconnectstring;
our $opt_ndbcluster_port_slave;
our $opt_ndbconnectstring_slave;
-our $opt_old_master;
-
our $opt_record;
our $opt_check_testcases;
@@ -290,9 +287,10 @@ our $opt_user_test;
our $opt_valgrind= 0;
our $opt_valgrind_mysqld= 0;
our $opt_valgrind_mysqltest= 0;
-our $default_valgrind_options= "-v --show-reachable=yes";
+our $default_valgrind_options= "--show-reachable=yes";
our $opt_valgrind_options;
our $opt_valgrind_path;
+our $opt_callgrind;
our $opt_stress= "";
our $opt_stress_suite= "main";
@@ -313,25 +311,25 @@ our $opt_warnings;
our $opt_udiff;
our $opt_skip_ndbcluster= 0;
-our $opt_with_ndbcluster;
our $opt_skip_ndbcluster_slave= 0;
-our $opt_with_ndbcluster_slave;
-our $opt_with_ndbcluster_all= 0;
+our $opt_with_ndbcluster= 0;
our $opt_with_ndbcluster_only= 0;
+our $opt_ndbcluster_supported= 0;
our $opt_ndb_extra_test= 0;
our $opt_skip_master_binlog= 0;
our $opt_skip_slave_binlog= 0;
our $exe_ndb_mgm;
+our $exe_ndb_waiter;
our $path_ndb_tools_dir;
-our $path_ndb_data_dir;
-our $path_ndb_slave_data_dir;
our $file_ndb_testrun_log;
-our $flag_ndb_status_ok= 1;
-our $flag_ndb_slave_status_ok= 1;
our @data_dir_lst;
+our $used_binlog_format;
+our $debug_compiled_binaries;
+our $glob_tot_real_time= 0;
+
######################################################################
#
# Function declarations
@@ -350,27 +348,27 @@ sub check_ssl_support ();
sub check_running_as_root();
sub check_ndbcluster_support ();
sub rm_ndbcluster_tables ($);
-sub ndbcluster_install ();
-sub ndbcluster_start ($);
-sub ndbcluster_stop ();
-sub ndbcluster_install_slave ();
-sub ndbcluster_start_slave ($);
-sub ndbcluster_stop_slave ();
+sub ndbcluster_start_install ($);
+sub ndbcluster_start ($$);
+sub ndbcluster_wait_started ($$);
+sub mysqld_wait_started($);
sub run_benchmarks ($);
sub initialize_servers ();
sub mysql_install_db ();
sub install_db ($$);
sub run_testcase ($);
+sub run_testcase_stop_servers ($);
+sub run_testcase_start_servers ($);
sub report_failure_and_restart ($);
sub do_before_start_master ($$);
sub do_before_start_slave ($$);
-sub mysqld_start ($$$$$);
-sub mysqld_arguments ($$$$$$);
-sub stop_masters_slaves ();
-sub stop_masters ();
-sub stop_slaves ();
+sub ndbd_start ($$$);
+sub ndb_mgmd_start ($);
+sub mysqld_start ($$$);
+sub mysqld_arguments ($$$$$);
+sub stop_all_servers ();
sub im_start ($$);
-sub im_stop ($);
+sub im_stop ($$);
sub run_mysqltest ($);
sub usage ($);
@@ -388,8 +386,9 @@ sub main () {
command_line_setup();
executable_setup();
- check_ndbcluster_support(); # We check whether to actually use it later
+ check_ndbcluster_support();
check_ssl_support();
+ check_debug_support();
environment_setup();
signal_setup();
@@ -427,7 +426,7 @@ sub main () {
$need_im||= $test->{component_id} eq 'im';
$use_slaves||= $test->{slave_num};
}
- $opt_with_ndbcluster= $opt_with_ndbcluster_slave= 0
+ $opt_skip_ndbcluster= $opt_skip_ndbcluster_slave= 1
unless $need_ndbcluster;
$opt_skip_im= 1 unless $need_im;
@@ -555,6 +554,11 @@ sub command_line_setup () {
"($opt_master_myport - $opt_master_myport + 10)");
}
+ # This is needed for test log evaluation in "gen-build-status-page"
+ # in all cases where the calling tool does not log the commands
+ # directly before it executes them, like "make test-force-pl" in RPM builds.
+ print "Logging: $0 ", join(" ", @ARGV), "\n";
+
# Read the command line
# Note: Keep list, and the order, in sync with usage at end of this file
@@ -575,12 +579,10 @@ sub command_line_setup () {
# Control what test suites or cases to run
'force' => \$opt_force,
'with-ndbcluster' => \$opt_with_ndbcluster,
+ 'with-ndbcluster-only' => \$opt_with_ndbcluster_only,
'skip-ndbcluster|skip-ndb' => \$opt_skip_ndbcluster,
- 'with-ndbcluster-slave' => \$opt_with_ndbcluster_slave,
'skip-ndbcluster-slave|skip-ndb-slave'
=> \$opt_skip_ndbcluster_slave,
- 'with-ndbcluster-all' => \$opt_with_ndbcluster_all,
- 'with-ndbcluster-only' => \$opt_with_ndbcluster_only,
'ndb-extra-test' => \$opt_ndb_extra_test,
'skip-master-binlog' => \$opt_skip_master_binlog,
'skip-slave-binlog' => \$opt_skip_slave_binlog,
@@ -634,6 +636,7 @@ sub command_line_setup () {
'valgrind-mysqld' => \$opt_valgrind_mysqld,
'valgrind-options=s' => \$opt_valgrind_options,
'valgrind-path=s' => \$opt_valgrind_path,
+ 'callgrind' => \$opt_callgrind,
# Stress testing
'stress' => \$opt_stress,
@@ -655,12 +658,11 @@ sub command_line_setup () {
'comment=s' => \$opt_comment,
'debug' => \$opt_debug,
'fast' => \$opt_fast,
- 'local' => \$opt_local,
- 'local-master' => \$opt_local_master,
'netware' => \$opt_netware,
- 'old-master' => \$opt_old_master,
'reorder' => \$opt_reorder,
+ 'enable-disabled' => \$opt_enable_disabled,
'script-debug' => \$opt_script_debug,
+ 'verbose' => \$opt_verbose,
'sleep=i' => \$opt_sleep,
'socket=s' => \$opt_socket,
'start-dirty' => \$opt_start_dirty,
@@ -710,6 +712,21 @@ sub command_line_setup () {
}
# --------------------------------------------------------------------------
+ # Find out type of logging that are being used
+ # --------------------------------------------------------------------------
+
+ # NOTE if the default binlog format is changed, this has to be changed
+ $used_binlog_format= "stmt";
+ foreach my $arg ( @opt_extra_mysqld_opt )
+ {
+ if ( defined mtr_match_substring($arg,"binlog-format=row"))
+ {
+ $used_binlog_format= "row";
+ }
+ }
+ mtr_report("Using binlog format '$used_binlog_format'");
+
+ # --------------------------------------------------------------------------
# Set the "var/" directory, as it is the base for everything else
# --------------------------------------------------------------------------
@@ -740,11 +757,6 @@ sub command_line_setup () {
# Do sanity checks of command line arguments
# --------------------------------------------------------------------------
- if ( $opt_extern and $opt_local )
- {
- mtr_error("Can't use --extern and --local at the same time");
- }
-
if ( ! $opt_socket )
{ # FIXME set default before reading options?
# $opt_socket= '@MYSQL_UNIX_ADDR@';
@@ -765,8 +777,8 @@ sub command_line_setup () {
$glob_use_embedded_server= 1;
push(@glob_test_mode, "embedded");
$opt_skip_rpl= 1; # We never run replication with embedded
- $opt_skip_ndbcluster= 1; # Avoid auto detection
- $opt_skip_ssl= 1;
+ $opt_skip_ndbcluster= 1; # Turn off use of NDB cluster
+ $opt_skip_ssl= 1; # Turn off use of SSL
if ( $opt_extern )
{
@@ -779,11 +791,40 @@ sub command_line_setup () {
push(@glob_test_mode, "ps-protocol");
}
- # FIXME don't understand what this is
-# if ( $opt_local_master )
-# {
-# $opt_master_myport= 3306;
-# }
+ if ( $opt_with_ndbcluster and $opt_skip_ndbcluster)
+ {
+ mtr_error("Can't specify both --with-ndbcluster and --skip-ndbcluster");
+ }
+
+ if ( $opt_ndbconnectstring )
+ {
+ $glob_use_running_ndbcluster= 1;
+ mtr_error("Can't specify --ndb-connectstring and --skip-ndbcluster")
+ if $opt_skip_ndbcluster;
+ mtr_error("Can't specify --ndb-connectstring and --ndbcluster-port")
+ if $opt_ndbcluster_port;
+ }
+ else
+ {
+ # Set default connect string
+ $opt_ndbconnectstring= "host=localhost:$opt_ndbcluster_port";
+ }
+
+ if ( $opt_ndbconnectstring_slave )
+ {
+ $glob_use_running_ndbcluster_slave= 1;
+ mtr_error("Can't specify ndb-connectstring_slave and " .
+ "--skip-ndbcluster-slave")
+ if $opt_skip_ndbcluster;
+ mtr_error("Can't specify --ndb-connectstring-slave and " .
+ "--ndbcluster-port-slave")
+ if $opt_ndbcluster_port_slave;
+ }
+ else
+ {
+ # Set default connect string
+ $opt_ndbconnectstring_slave= "host=localhost:$opt_ndbcluster_port_slave";
+ }
if ( $opt_small_bench )
{
@@ -834,6 +875,17 @@ sub command_line_setup () {
$opt_valgrind= 1;
}
+ if ( $opt_callgrind )
+ {
+ mtr_report("Turning on valgrind with callgrind for mysqld(s)");
+ $opt_valgrind= 1;
+ $opt_valgrind_mysqld= 1;
+
+ # Set special valgrind options unless options passed on command line
+ $opt_valgrind_options="--trace-children=yes"
+ unless defined $opt_valgrind_options;
+ }
+
if ( $opt_valgrind )
{
# Set valgrind_options to default unless already defined
@@ -884,61 +936,78 @@ sub command_line_setup () {
$master->[0]=
{
+ type => "master",
+ idx => 0,
path_myddir => "$opt_vardir/master-data",
path_myerr => "$opt_vardir/log/master.err",
path_mylog => "$opt_vardir/log/master.log",
- path_mypid => "$opt_vardir/run/master.pid",
- path_mysock => "$sockdir/master.sock",
- path_myport => $opt_master_myport,
+ path_pid => "$opt_vardir/run/master.pid",
+ path_sock => "$sockdir/master.sock",
+ port => $opt_master_myport,
start_timeout => 400, # enough time create innodb tables
-
- ndbcluster => 1, # ndbcluster not started
+ cluster => 0, # index in clusters list
+ start_opts => [],
};
$master->[1]=
{
+ type => "master",
+ idx => 1,
path_myddir => "$opt_vardir/master1-data",
path_myerr => "$opt_vardir/log/master1.err",
path_mylog => "$opt_vardir/log/master1.log",
- path_mypid => "$opt_vardir/run/master1.pid",
- path_mysock => "$sockdir/master1.sock",
- path_myport => $opt_master_myport + 1,
+ path_pid => "$opt_vardir/run/master1.pid",
+ path_sock => "$sockdir/master1.sock",
+ port => $opt_master_myport + 1,
start_timeout => 400, # enough time create innodb tables
+ cluster => 0, # index in clusters list
+ start_opts => [],
};
$slave->[0]=
{
+ type => "slave",
+ idx => 0,
path_myddir => "$opt_vardir/slave-data",
path_myerr => "$opt_vardir/log/slave.err",
path_mylog => "$opt_vardir/log/slave.log",
- path_mypid => "$opt_vardir/run/slave.pid",
- path_mysock => "$sockdir/slave.sock",
- path_myport => $opt_slave_myport,
+ path_pid => "$opt_vardir/run/slave.pid",
+ path_sock => "$sockdir/slave.sock",
+ port => $opt_slave_myport,
start_timeout => 400,
- ndbcluster => 1, # ndbcluster not started
+ cluster => 1, # index in clusters list
+ start_opts => [],
};
$slave->[1]=
{
+ type => "slave",
+ idx => 1,
path_myddir => "$opt_vardir/slave1-data",
path_myerr => "$opt_vardir/log/slave1.err",
path_mylog => "$opt_vardir/log/slave1.log",
- path_mypid => "$opt_vardir/run/slave1.pid",
- path_mysock => "$sockdir/slave1.sock",
- path_myport => $opt_slave_myport + 1,
+ path_pid => "$opt_vardir/run/slave1.pid",
+ path_sock => "$sockdir/slave1.sock",
+ port => $opt_slave_myport + 1,
start_timeout => 300,
+ cluster => -1, # index in clusters list
+ start_opts => [],
};
$slave->[2]=
{
+ type => "slave",
+ idx => 2,
path_myddir => "$opt_vardir/slave2-data",
path_myerr => "$opt_vardir/log/slave2.err",
path_mylog => "$opt_vardir/log/slave2.log",
- path_mypid => "$opt_vardir/run/slave2.pid",
- path_mysock => "$sockdir/slave2.sock",
- path_myport => $opt_slave_myport + 2,
+ path_pid => "$opt_vardir/run/slave2.pid",
+ path_sock => "$sockdir/slave2.sock",
+ port => $opt_slave_myport + 2,
start_timeout => 300,
+ cluster => -1, # index in clusters list
+ start_opts => [],
};
$instance_manager=
@@ -978,11 +1047,53 @@ sub command_line_setup () {
old_log_format => 1
};
+ my $data_dir= "$opt_vardir/ndbcluster-$opt_ndbcluster_port";
+ $clusters->[0]=
+ {
+ name => "Master",
+ nodes => 2,
+ port => "$opt_ndbcluster_port",
+ data_dir => "$data_dir",
+ connect_string => "$opt_ndbconnectstring",
+ path_pid => "$data_dir/ndb_3.pid", # Nodes + 1
+ pid => 0, # pid of ndb_mgmd
+ installed_ok => 'NO',
+ };
+
+ $data_dir= "$opt_vardir/ndbcluster-$opt_ndbcluster_port_slave";
+ $clusters->[1]=
+ {
+ name => "Slave",
+ nodes => 1,
+ port => "$opt_ndbcluster_port_slave",
+ data_dir => "$data_dir",
+ connect_string => "$opt_ndbconnectstring_slave",
+ path_pid => "$data_dir/ndb_2.pid", # Nodes + 1
+ pid => 0, # pid of ndb_mgmd
+ installed_ok => 'NO',
+ };
+
+ # Init pids of ndbd's
+ foreach my $cluster ( @{$clusters} )
+ {
+ for ( my $idx= 0; $idx < $cluster->{'nodes'}; $idx++ )
+ {
+ my $nodeid= $idx+1;
+ $cluster->{'ndbds'}->[$idx]=
+ {
+ pid => 0,
+ nodeid => $nodeid,
+ path_pid => "$cluster->{'data_dir'}/ndb_${nodeid}.pid",
+ path_fs => "$cluster->{'data_dir'}/ndb_${nodeid}_fs",
+ };
+ }
+ }
+
if ( $opt_extern )
{
$glob_use_running_server= 1;
$opt_skip_rpl= 1; # We don't run rpl test cases
- $master->[0]->{'path_mysock'}= $opt_socket;
+ $master->[0]->{'path_sock'}= $opt_socket;
}
$path_timefile= "$opt_vardir/log/mysqltest-time";
@@ -1111,6 +1222,9 @@ sub executable_setup () {
"/usr/bin/false");
$path_ndb_tools_dir= mtr_path_exists("$glob_basedir/storage/ndb/tools");
$exe_ndb_mgm= "$glob_basedir/storage/ndb/src/mgmclient/ndb_mgm";
+ $exe_ndb_waiter= "$glob_basedir/storage/ndb/tools/ndb_waiter";
+ $exe_ndbd= "$glob_basedir/storage/ndb/src/kernel/ndbd";
+ $exe_ndb_mgmd= "$glob_basedir/storage/ndb/src/mgmsrv/ndb_mgmd";
$lib_udf_example=
mtr_file_exists("$glob_basedir/sql/.libs/udf_example.so");
}
@@ -1170,13 +1284,14 @@ sub executable_setup () {
$path_ndb_tools_dir= "$glob_basedir/bin";
$exe_ndb_mgm= "$glob_basedir/bin/ndb_mgm";
+ $exe_ndb_waiter= "$glob_basedir/bin/ndb_waiter";
+ $exe_ndbd= "$glob_basedir/libexec/ndbd";
+ $exe_ndb_mgmd= "$glob_basedir/libexec/ndb_mgmd";
}
$exe_master_mysqld= $exe_master_mysqld || $exe_mysqld;
$exe_slave_mysqld= $exe_slave_mysqld || $exe_mysqld;
- $path_ndb_data_dir= "$opt_vardir/ndbcluster-$opt_ndbcluster_port";
- $path_ndb_slave_data_dir= "$opt_vardir/ndbcluster-$opt_ndbcluster_port_slave";
$file_ndb_testrun_log= "$opt_vardir/log/ndb_testrun.log";
}
@@ -1227,13 +1342,13 @@ sub environment_setup () {
$ENV{'MYSQL_TEST_DIR'}= $glob_mysql_test_dir;
$ENV{'MYSQLTEST_VARDIR'}= $opt_vardir;
$ENV{'MYSQL_TMP_DIR'}= $opt_tmpdir;
- $ENV{'MASTER_MYSOCK'}= $master->[0]->{'path_mysock'};
- $ENV{'MASTER_MYSOCK1'}= $master->[1]->{'path_mysock'};
- $ENV{'MASTER_MYPORT'}= $master->[0]->{'path_myport'};
- $ENV{'MASTER_MYPORT1'}= $master->[1]->{'path_myport'};
- $ENV{'SLAVE_MYPORT'}= $slave->[0]->{'path_myport'};
- $ENV{'SLAVE_MYPORT1'}= $slave->[1]->{'path_myport'};
- $ENV{'SLAVE_MYPORT2'}= $slave->[2]->{'path_myport'};
+ $ENV{'MASTER_MYSOCK'}= $master->[0]->{'path_sock'};
+ $ENV{'MASTER_MYSOCK1'}= $master->[1]->{'path_sock'};
+ $ENV{'MASTER_MYPORT'}= $master->[0]->{'port'};
+ $ENV{'MASTER_MYPORT1'}= $master->[1]->{'port'};
+ $ENV{'SLAVE_MYPORT'}= $slave->[0]->{'port'};
+ $ENV{'SLAVE_MYPORT1'}= $slave->[1]->{'port'};
+ $ENV{'SLAVE_MYPORT2'}= $slave->[2]->{'port'};
# $ENV{'MYSQL_TCP_PORT'}= '@MYSQL_TCP_PORT@'; # FIXME
$ENV{'MYSQL_TCP_PORT'}= 3306;
@@ -1290,7 +1405,7 @@ sub signal_setup () {
sub handle_int_signal () {
$SIG{INT}= 'DEFAULT'; # If we get a ^C again, we die...
mtr_warning("got INT signal, cleaning up.....");
- stop_masters_slaves();
+ stop_all_servers();
mtr_error("We die from ^C signal from user");
}
@@ -1306,11 +1421,11 @@ sub kill_running_server () {
if ( $opt_fast or $glob_use_embedded_server )
{
# FIXME is embedded server really using PID files?!
- unlink($master->[0]->{'path_mypid'});
- unlink($master->[1]->{'path_mypid'});
- unlink($slave->[0]->{'path_mypid'});
- unlink($slave->[1]->{'path_mypid'});
- unlink($slave->[2]->{'path_mypid'});
+ unlink($master->[0]->{'path_pid'});
+ unlink($master->[1]->{'path_pid'});
+ unlink($slave->[0]->{'path_pid'});
+ unlink($slave->[1]->{'path_pid'});
+ unlink($slave->[2]->{'path_pid'});
}
else
{
@@ -1323,13 +1438,7 @@ sub kill_running_server () {
mkpath("$opt_vardir/log"); # Needed for mysqladmin log
mtr_kill_leftovers();
- $using_ndbcluster_master= $opt_with_ndbcluster;
- ndbcluster_stop();
- $master->[0]->{'ndbcluster'}= 1;
- $using_ndbcluster_slave= $opt_with_ndbcluster;
- ndbcluster_stop_slave();
- $slave->[0]->{'ndbcluster'}= 1;
- }
+ }
}
sub kill_and_cleanup () {
@@ -1467,6 +1576,29 @@ sub check_ssl_support () {
}
+sub check_debug_support () {
+
+ # check debug support by testing using a switch
+ # that is only available in that case
+ if ( mtr_run($exe_mysqld,
+ ["--no-defaults",
+ "--debug",
+ "--help"],
+ "", "/dev/null", "/dev/null", "") != 0 )
+ {
+ # mtr_report("Binaries are not debug compiled");
+ $debug_compiled_binaries= 0;
+
+ if ( $opt_debug )
+ {
+ mtr_error("Can't use --debug, binaries does not support it");
+ }
+ return;
+ }
+ mtr_report("Binaries are debug compiled");
+ $debug_compiled_binaries= 1;
+}
+
##############################################################################
#
# Start the ndb cluster
@@ -1479,12 +1611,10 @@ sub check_ndbcluster_support () {
{
mtr_report("Skipping ndbcluster");
$opt_skip_ndbcluster_slave= 1;
- $opt_with_ndbcluster= 0;
- $opt_with_ndbcluster_slave= 0;
return;
}
- # check ndbcluster support by testing using a switch
+ # check ndbcluster support by runnning mysqld using a switch
# that is only available in that case
if ( mtr_run($exe_mysqld,
["--no-defaults",
@@ -1495,221 +1625,227 @@ sub check_ndbcluster_support () {
mtr_report("Skipping ndbcluster, mysqld not compiled with ndbcluster");
$opt_skip_ndbcluster= 1;
$opt_skip_ndbcluster_slave= 1;
- $opt_with_ndbcluster= 0;
- $opt_with_ndbcluster_slave= 0;
return;
}
+ $opt_ndbcluster_supported= 1;
+ mtr_report("Using ndbcluster when necessary, mysqld supports it");
+ return;
+}
- mtr_report("Using ndbcluster if necessary, mysqld supports it");
- $opt_with_ndbcluster= 1;
- if ( $opt_ndbconnectstring )
- {
- $glob_use_running_ndbcluster= 1;
- }
- else
- {
- $opt_ndbconnectstring= "host=localhost:$opt_ndbcluster_port";
- }
- if ( $opt_skip_ndbcluster_slave )
- {
- $opt_with_ndbcluster_slave= 0;
- }
- else
+sub ndbcluster_start_install ($) {
+ my $cluster= shift;
+
+ if ( $opt_skip_ndbcluster or $glob_use_running_ndbcluster )
{
- $opt_with_ndbcluster_slave= 1;
- if ( $opt_ndbconnectstring_slave )
- {
- $glob_use_running_ndbcluster_slave= 1;
- }
- else
- {
- $opt_ndbconnectstring_slave= "host=localhost:$opt_ndbcluster_port_slave";
- }
+ return 0;
}
- return;
-}
+ mtr_report("Installing $cluster->{'name'} Cluster");
+ mkdir($cluster->{'data_dir'});
-sub ndbcluster_install () {
+ # Create a config file from template
+ my $ndb_no_ord=512;
+ my $ndb_no_attr=2048;
+ my $ndb_con_op=105000;
+ my $ndb_dmem="80M";
+ my $ndb_imem="24M";
+ my $ndb_pbmem="32M";
+ my $nodes= $cluster->{'nodes'};
+ my $ndb_host= "localhost";
+ my $ndb_diskless= 0;
- if ( ! $opt_with_ndbcluster or $glob_use_running_ndbcluster )
+ if (!$opt_bench)
{
- return 0;
+ # Use a smaller configuration
+ $ndb_no_ord=32;
+ $ndb_con_op=5000;
+ $ndb_dmem="20M";
+ $ndb_imem="1M";
+ $ndb_pbmem="4M";
}
- mtr_report("Installing ndbcluster master");
- my $ndbcluster_opts= $opt_bench ? "" : "--small";
- if ( mtr_run("$glob_mysql_test_dir/ndb/ndbcluster",
- ["--port=$opt_ndbcluster_port",
- "--data-dir=$opt_vardir",
- "--verbose=2",
- $ndbcluster_opts,
- "--initial",
- "--relative-config-data-dir",
- "--core"],
- "", "", "", "") )
+
+ my $config_file_template= "ndb/ndb_config_${nodes}_node.ini";
+ my $config_file= "$cluster->{'data_dir'}/config.ini";
+
+ open(IN, $config_file_template)
+ or mtr_error("Can't open $config_file_template: $!");
+ open(OUT, ">", $config_file)
+ or mtr_error("Can't write to $config_file: $!");
+ while (<IN>)
{
- return 1;
+ chomp;
+
+ s/CHOOSE_MaxNoOfAttributes/$ndb_no_attr/;
+ s/CHOOSE_MaxNoOfOrderedIndexes/$ndb_no_ord/;
+ s/CHOOSE_MaxNoOfConcurrentOperations/$ndb_con_op/;
+ s/CHOOSE_DataMemory/$ndb_dmem/;
+ s/CHOOSE_IndexMemory/$ndb_imem/;
+ s/CHOOSE_Diskless/$ndb_diskless/;
+ s/CHOOSE_HOSTNAME_.*/$ndb_host/;
+ s/CHOOSE_FILESYSTEM/$cluster->{'data_dir'}/;
+ s/CHOOSE_PORT_MGM/$cluster->{'port'}/;
+ s/CHOOSE_DiskPageBufferMemory/$ndb_pbmem/;
+
+ print OUT "$_ \n";
}
+ close OUT;
+ close IN;
+
+
+ # Start cluster with "--initial"
- $using_ndbcluster_master= 1;
- ndbcluster_stop();
- $master->[0]->{'ndbcluster'}= 1;
+ ndbcluster_start($cluster, "--initial");
return 0;
}
-sub ndbcluster_start ($) {
- my $use_ndbcluster= shift;
+sub ndbcluster_wait_started($$){
+ my $cluster= shift;
+ my $ndb_waiter_extra_opt= shift;
+ my $path_waiter_log= "$cluster->{'data_dir'}/ndb_waiter.log";
+ my $args;
- if ( ! $use_ndbcluster )
- {
- $using_ndbcluster_master= 0;
- return 0;
- }
- if ( $glob_use_running_ndbcluster )
- {
- $using_ndbcluster_master= 1;
- return 0;
- }
- if ( $using_ndbcluster_master )
- {
- # Master already started
- return 0;
- }
- # FIXME, we want to _append_ output to file $file_ndb_testrun_log instead of /dev/null
- #mtr_report("Starting ndbcluster master");
- if ( mtr_run("$glob_mysql_test_dir/ndb/ndbcluster",
- ["--port=$opt_ndbcluster_port",
- "--data-dir=$opt_vardir",
- "--character-sets-dir=$path_charsetsdir",
- "--verbose=2",
- "--core"],
- "", "/dev/null", "", "") )
+ mtr_init_args(\$args);
+
+ mtr_add_arg($args, "--no-defaults");
+ mtr_add_arg($args, "--core");
+ mtr_add_arg($args, "--ndb-connectstring=%s", $cluster->{'connect_string'});
+ mtr_add_arg($args, "--timeout=60");
+
+ if ($ndb_waiter_extra_opt)
{
- mtr_error("Error ndbcluster_start");
- $using_ndbcluster_master= 0;
- return 1;
+ mtr_add_arg($args, "$ndb_waiter_extra_opt");
}
- $using_ndbcluster_master= 1;
- return 0;
+ # Start the ndb_waiter which will connect to the ndb_mgmd
+ # and poll it for state of the ndbd's, will return when
+ # all nodes in the cluster is started
+ my $res= mtr_run($exe_ndb_waiter, $args,
+ "", $path_waiter_log, $path_waiter_log, "");
+ mtr_verbose("ndbcluster_wait_started, returns: $res") if $res;
+ return $res;
}
-sub rm_ndbcluster_tables ($) {
- my $dir= shift;
- foreach my $bin ( glob("$dir/cluster/apply_status*"),
- glob("$dir/cluster/schema*") )
- {
- unlink($bin);
- }
+
+
+sub mysqld_wait_started($){
+ my $mysqld= shift;
+
+ my $res= sleep_until_file_created($mysqld->{'path_pid'},
+ $mysqld->{'start_timeout'},
+ $mysqld->{'pid'});
+ return $res == 0;
}
-sub ndbcluster_stop () {
- if ( ! $using_ndbcluster_master or $glob_use_running_ndbcluster )
+sub ndb_mgmd_start ($) {
+ my $cluster= shift;
+
+ my $args; # Arg vector
+ my $pid= -1;
+
+ mtr_init_args(\$args);
+ mtr_add_arg($args, "--no-defaults");
+ mtr_add_arg($args, "--core");
+ mtr_add_arg($args, "--nodaemon");
+ mtr_add_arg($args, "--config-file=%s", "$cluster->{'data_dir'}/config.ini");
+
+
+ my $path_ndb_mgmd_log= "$cluster->{'data_dir'}/\l$cluster->{'name'}_ndb_mgmd.log";
+ $pid= mtr_spawn($exe_ndb_mgmd, $args, "",
+ $path_ndb_mgmd_log,
+ $path_ndb_mgmd_log,
+ "",
+ { append_log_file => 1 });
+
+
+ # FIXME Should not be needed
+ # Unfortunately the cluster nodes will fail to start
+ # if ndb_mgmd has not started properly
+ while (ndbcluster_wait_started($cluster, "--no-contact"))
{
- $using_ndbcluster_master= 0;
- return;
+ select(undef, undef, undef, 0.1);
}
- # FIXME, we want to _append_ output to file $file_ndb_testrun_log instead of /dev/null
- #mtr_report("Stopping ndbcluster master");
- mtr_run("$glob_mysql_test_dir/ndb/ndbcluster",
- ["--port=$opt_ndbcluster_port",
- "--data-dir=$opt_vardir",
- "--verbose=2",
- "--stop"],
- "", "/dev/null", "", "");
-
- rm_ndbcluster_tables ($master->[0]->{'path_myddir'});
- rm_ndbcluster_tables ($master->[1]->{'path_myddir'});
- $using_ndbcluster_master= 0;
- return;
+
+ # Remember pid of ndb_mgmd
+ $cluster->{'pid'}= $pid;
+
+ mtr_verbose("ndb_mgmd_start, pid: $pid");
+
+ return $pid;
}
-sub ndbcluster_install_slave () {
- if ( ! $opt_with_ndbcluster_slave or $glob_use_running_ndbcluster_slave )
- {
- return 0;
- }
- mtr_report("Installing ndbcluster slave");
- if ( mtr_run("$glob_mysql_test_dir/ndb/ndbcluster",
- ["--port=$opt_ndbcluster_port_slave",
- "--data-dir=$opt_vardir",
- "--verbose=2",
- "--small",
- "--ndbd-nodes=1",
- "--initial",
- "--relative-config-data-dir",
- "--core"],
- "", "", "", "") )
- {
- return 1;
- }
+sub ndbd_start ($$$) {
+ my $cluster= shift;
+ my $idx= shift;
+ my $extra_args= shift;
- $using_ndbcluster_slave= 1;
- ndbcluster_stop_slave();
- $slave->[0]->{'ndbcluster'}= 1;
+ my $args; # Arg vector
+ my $pid= -1;
- return 0;
+ mtr_init_args(\$args);
+ mtr_add_arg($args, "--no-defaults");
+ mtr_add_arg($args, "--core");
+ mtr_add_arg($args, "--ndb-connectstring=%s", "$cluster->{'connect_string'}");
+ mtr_add_arg($args, "--character-sets-dir=%s", "$path_charsetsdir");
+ mtr_add_arg($args, "--nodaemon");
+ mtr_add_arg($args, "$extra_args");
+
+ my $nodeid= $cluster->{'ndbds'}->[$idx]->{'nodeid'};
+ my $path_ndbd_log= "$cluster->{'data_dir'}/ndb_${nodeid}.log";
+ $pid= mtr_spawn($exe_ndbd, $args, "",
+ $path_ndbd_log,
+ $path_ndbd_log,
+ "",
+ { append_log_file => 1 });
+
+ # Add pid to list of pids for this cluster
+ $cluster->{'ndbds'}->[$idx]->{'pid'}= $pid;
+
+ mtr_verbose("ndbd_start, pid: $pid");
+
+ return $pid;
}
-sub ndbcluster_start_slave ($) {
- my $use_ndbcluster= shift;
- if ( ! $use_ndbcluster )
+sub ndbcluster_start ($$) {
+ my $cluster= shift;
+ my $extra_args= shift;
+
+ mtr_verbose("ndbcluster_start '$cluster->{'name'}'");
+
+ if ( $glob_use_running_ndbcluster )
{
- $using_ndbcluster_slave= 0;
return 0;
}
- if ( $glob_use_running_ndbcluster_slave )
+
+ if ( $cluster->{'pid'} )
{
- $using_ndbcluster_slave= 1;
- return 0;
+ mtr_error("Cluster '$cluster->{'name'}' already started");
}
- # FIXME, we want to _append_ output to file $file_ndb_testrun_log instead of /dev/null
- #mtr_report("Starting ndbcluster slave");
- if ( mtr_run("$glob_mysql_test_dir/ndb/ndbcluster",
- ["--port=$opt_ndbcluster_port_slave",
- "--data-dir=$opt_vardir",
- "--verbose=2",
- "--ndbd-nodes=1",
- "--core"],
- "", "/dev/null", "", "") )
+ my $pid= ndb_mgmd_start($cluster);
+
+ for ( my $idx= 0; $idx < $cluster->{'nodes'}; $idx++ )
{
- mtr_error("Error ndbcluster_start_slave");
- $using_ndbcluster_slave= 0;
- return 1;
+ ndbd_start($cluster, $idx, $extra_args);
}
- $using_ndbcluster_slave= 1;
return 0;
}
-sub ndbcluster_stop_slave () {
- if ( ! $using_ndbcluster_slave or $glob_use_running_ndbcluster_slave )
+sub rm_ndbcluster_tables ($) {
+ my $dir= shift;
+ foreach my $bin ( glob("$dir/cluster/apply_status*"),
+ glob("$dir/cluster/schema*") )
{
- $using_ndbcluster_slave= 0;
- return;
+ unlink($bin);
}
- # FIXME, we want to _append_ output to file $file_ndb_testrun_log instead of /dev/null
- #mtr_report("Stopping ndbcluster slave");
- mtr_run("$glob_mysql_test_dir/ndb/ndbcluster",
- ["--port=$opt_ndbcluster_port_slave",
- "--data-dir=$opt_vardir",
- "--verbose=2",
- "--stop"],
- "", "/dev/null", "", "");
-
- rm_ndbcluster_tables ($slave->[0]->{'path_myddir'});
-
- $using_ndbcluster_slave= 0;
- return;
}
@@ -1724,10 +1860,9 @@ sub run_benchmarks ($) {
my $args;
- if ( ! $glob_use_embedded_server and ! $opt_local_master )
+ if ( ! $glob_use_embedded_server )
{
- $master->[0]->{'pid'}= mysqld_start('master',0,[],[],
- $using_ndbcluster_master);
+ mysqld_start($master->[0],[],[]);
if ( ! $master->[0]->{'pid'} )
{
mtr_error("Can't start the mysqld server");
@@ -1736,7 +1871,7 @@ sub run_benchmarks ($) {
mtr_init_args(\$args);
- mtr_add_arg($args, "--socket=%s", $master->[0]->{'path_mysock'});
+ mtr_add_arg($args, "--socket=%s", $master->[0]->{'path_sock'});
mtr_add_arg($args, "--user=%s", $opt_user);
if ( $opt_small_bench )
@@ -1794,12 +1929,12 @@ sub run_suite () {
mtr_print_thick_line();
- mtr_report("Finding Tests in the '$suite' suite");
-
mtr_timer_start($glob_timers,"suite", 60 * $opt_suite_timeout);
mtr_report("Starting Tests in the '$suite' suite");
+ mtr_report_tests_not_skipped_though_disabled($tests);
+
mtr_print_header();
foreach my $tinfo ( @$tests )
@@ -1815,7 +1950,7 @@ sub run_suite () {
! $glob_use_running_server and
! $glob_use_embedded_server )
{
- stop_masters_slaves();
+ stop_all_servers();
}
if ( $opt_gcov )
@@ -1877,42 +2012,50 @@ sub mysql_install_db () {
im_prepare_env($instance_manager);
}
- if ( ndbcluster_install() )
+
+ my $cluster_started_ok= 1; # Assume it can be started
+
+
+ if (ndbcluster_start_install($clusters->[0]) ||
+ $use_slaves && ndbcluster_start_install($clusters->[1]))
+ {
+ mtr_warning("Failed to start install of cluster");
+ $cluster_started_ok= 0;
+ }
+
+
+ foreach my $cluster (@{$clusters})
{
- if ( $opt_force)
+
+ next if !$cluster->{'pid'};
+
+ $cluster->{'installed_ok'}= "YES"; # Assume install suceeds
+
+ if (ndbcluster_wait_started($cluster, ""))
{
# failed to install, disable usage and flag that its no ok
- mtr_report("ndbcluster_install failed, continuing without cluster");
- $opt_with_ndbcluster= 0;
- $flag_ndb_status_ok= 0;
- $ENV{'NDB_STATUS_OK'}= "NO";
- }
- else
- {
- print "Aborting: Failed to install ndb cluster\n";
- print "To continue, re-run with '--force'.\n";
- mtr_exit(1);
+ mtr_report("ndbcluster_install of $cluster->{'name'} failed");
+ $cluster->{"installed_ok"}= "NO";
+
+ $cluster_started_ok= 0;
}
}
- if ( $use_slaves and ndbcluster_install_slave() )
+ if ( ! $cluster_started_ok )
{
if ( $opt_force)
{
- # failed to install, disable usage and flag that its no ok
- mtr_report("ndbcluster_install_slave failed, " .
- "continuing without slave cluster");
- $opt_with_ndbcluster_slave= 0;
- $flag_ndb_slave_status_ok= 0;
+ # Continue without cluster
}
else
{
- print "Aborting: Failed to install ndb cluster\n";
- print "To continue, re-run with '--force'.\n";
- mtr_exit(1);
+ mtr_error("To continue, re-run with '--force'.");
}
}
+ # Stop clusters...
+ stop_all_servers();
+
return 0;
}
@@ -2101,8 +2244,6 @@ sub run_testcase ($) {
my $tname= $tinfo->{'name'};
- my $ndbcluster_opt;
-
mtr_tonewfile($path_current_test_log,"$tname\n"); # Always tell where we are
# output current test to ndbcluster log file to enable diagnostics
@@ -2122,68 +2263,25 @@ sub run_testcase ($) {
return;
}
- if ( $tinfo->{'ndb_test'} and ! $flag_ndb_status_ok )
+ # If test needs cluster, check that master installed ok
+ if ( $tinfo->{'ndb_test'} and $clusters->[0]->{'installed_ok'} eq "NO" )
{
mtr_report_test_name($tinfo);
mtr_report_test_failed($tinfo);
return;
}
- # ----------------------------------------------------------------------
- # If not using a running servers we may need to stop and restart.
- # We restart in the case we have initiation scripts, server options
- # etc to run. But we also restart again after the test first restart
- # and test is run, to get back to normal server settings.
- #
- # To make the code a bit more clean, we actually only stop servers
- # here, and mark this to be done. Then a generic "start" part will
- # start up the needed servers again.
- # ----------------------------------------------------------------------
-
- if ( ! $glob_use_running_server and ! $glob_use_embedded_server )
+ # If test needs slave cluster, check that it installed ok
+ if ( $tinfo->{'ndb_test'} and $tinfo->{'slave_num'} and
+ $clusters->[1]->{'installed_ok'} eq "NO" )
{
- # We try to find out if we are to restart the server
- my $do_restart= 0; # Assumes we don't have to
-
- if ( $tinfo->{'master_sh'} )
- {
- $do_restart= 1; # Always restart if script to run
- }
- elsif ( $opt_with_ndbcluster and $tinfo->{'ndb_test'} != $using_ndbcluster_master )
- {
- $do_restart= 1; # Restart without cluster
- }
- elsif ( $master->[0]->{'running_master_is_special'} and
- $master->[0]->{'running_master_is_special'}->{'timezone'} eq
- $tinfo->{'timezone'} and
- mtr_same_opts($master->[0]->{'running_master_is_special'}->{'master_opt'},
- $tinfo->{'master_opt'}) )
- {
- # If running master was started with special settings, but
- # the current test requuires the same ones, we *don't* restart.
- $do_restart= 0;
- }
- elsif ( $tinfo->{'master_restart'} or
- $master->[0]->{'running_master_is_special'} )
- {
- $do_restart= 1;
- }
-
- if ( $do_restart )
- {
- stop_masters();
- delete $master->[0]->{'running_master_is_special'}; # Forget history
- }
-
- # ----------------------------------------------------------------------
- # Always terminate all slaves, if any. Else we may have useless
- # reconnection attempts and error messages in case the slave and
- # master servers restart.
- # ----------------------------------------------------------------------
-
- stop_slaves();
+ mtr_report_test_name($tinfo);
+ mtr_report_test_failed($tinfo);
+ return;
}
+ run_testcase_stop_servers($tinfo);
+
# ----------------------------------------------------------------------
# Prepare to start masters. Even if we use embedded, we want to run
# the preparation.
@@ -2191,18 +2289,12 @@ sub run_testcase ($) {
$ENV{'TZ'}= $tinfo->{'timezone'};
- mtr_report_test_name($tinfo);
-
mtr_tofile($master->[0]->{'path_myerr'},"CURRENT_TEST: $tname\n");
if ( $master->[1]->{'pid'} )
{
mtr_tofile($master->[1]->{'path_myerr'},"CURRENT_TEST: $tname\n");
}
- # FIXME test cases that depend on each other, prevent this from
- # being at this location.
- # do_before_start_master($tname,$tinfo->{'master_sh'});
-
# ----------------------------------------------------------------------
# If any mysqld servers running died, we have to know
# ----------------------------------------------------------------------
@@ -2210,126 +2302,9 @@ sub run_testcase ($) {
mtr_record_dead_children();
# ----------------------------------------------------------------------
- # Start masters
+ # Start masters needed by the testcase
# ----------------------------------------------------------------------
-
- if ( ! $glob_use_running_server and ! $glob_use_embedded_server )
- {
- # FIXME give the args to the embedded server?!
- # FIXME what does $opt_local_master mean?!
- # FIXME split up start and check that started so that can do
- # starts in parallel, masters and slaves at the same time.
-
- if ( $tinfo->{'component_id'} eq 'mysqld' and ! $opt_local_master )
- {
- if ( $opt_with_ndbcluster and $master->[0]->{'ndbcluster'} )
- {
- # Cluster is not started
-
- # Call ndbcluster_start to check if test case needs cluster
- # Start it if not already started
- $master->[0]->{'ndbcluster'}= ndbcluster_start($tinfo->{'ndb_test'});
- if ( $master->[0]->{'ndbcluster'} )
- {
- report_failure_and_restart($tinfo);
- return;
- }
- }
- if ( ! $master->[0]->{'pid'} )
- {
- # FIXME not correct location for do_before_start_master()
- do_before_start_master($tname,$tinfo->{'master_sh'});
- $master->[0]->{'pid'}=
- mysqld_start('master',0,$tinfo->{'master_opt'},[],
- $using_ndbcluster_master);
- if ( ! $master->[0]->{'pid'} )
- {
- report_failure_and_restart($tinfo);
- return;
- }
- }
- if ( $using_ndbcluster_master and ! $master->[1]->{'pid'} )
- {
- # Test needs cluster, start an extra mysqld connected to cluster
- # First wait for first mysql server to have created ndb system tables ok
- if ( ! sleep_until_file_created("$master->[0]->{'path_myddir'}/cluster/apply_status.ndb",
- $master->[0]->{'start_timeout'},
- $master->[0]->{'pid'}))
- {
- report_failure_and_restart($tinfo);
- return;
- }
- mtr_tofile($master->[1]->{'path_myerr'},"CURRENT_TEST: $tname\n");
- $master->[1]->{'pid'}=
- mysqld_start('master',1,$tinfo->{'master_opt'},[],
- $using_ndbcluster_master);
- if ( ! $master->[1]->{'pid'} )
- {
- report_failure_and_restart($tinfo);
- return;
- }
- }
-
- if ( $tinfo->{'master_restart'} )
- {
- # Save this test case information, so next can examine it
- $master->[0]->{'running_master_is_special'}= $tinfo;
- }
- }
- elsif ( ! $opt_skip_im and $tinfo->{'component_id'} eq 'im' )
- {
- # We have to create defaults file every time, in order to ensure that it
- # will be the same for each test. The problem is that test can change the
- # file (by SET/UNSET commands), so w/o recreating the file, execution of
- # one test can affect the other.
-
- im_create_defaults_file($instance_manager);
-
- im_start($instance_manager, $tinfo->{im_opts});
- }
-
- # ----------------------------------------------------------------------
- # Start slaves - if needed
- # ----------------------------------------------------------------------
-
- if ( $tinfo->{'slave_num'} )
- {
- mtr_tofile($slave->[0]->{'path_myerr'},"CURRENT_TEST: $tname\n");
-
- do_before_start_slave($tname,$tinfo->{'slave_sh'});
-
- for ( my $idx= 0; $idx < $tinfo->{'slave_num'}; $idx++ )
- {
- if ( ! $slave->[$idx]->{'pid'} )
- {
- $ndbcluster_opt= 0;
- if ( $idx == 0)
- {
- if ( $slave->[0]->{'ndbcluster'} )
- {
- $slave->[0]->{'ndbcluster'}=
- ndbcluster_start_slave($tinfo->{'ndb_test'});
- if ( $slave->[0]->{'ndbcluster'} )
- {
- report_failure_and_restart($tinfo);
- return;
- }
- }
- $ndbcluster_opt= $using_ndbcluster_slave;
- }
- $slave->[$idx]->{'pid'}=
- mysqld_start('slave',$idx,
- $tinfo->{'slave_opt'}, $tinfo->{'slave_mi'},
- $ndbcluster_opt);
- if ( ! $slave->[$idx]->{'pid'} )
- {
- report_failure_and_restart($tinfo);
- return;
- }
- }
- }
- }
- }
+ run_testcase_start_servers($tinfo);
# ----------------------------------------------------------------------
# If --start-and-exit or --start-dirty given, stop here to let user manually
@@ -2346,6 +2321,8 @@ sub run_testcase ($) {
# Run the test case
# ----------------------------------------------------------------------
+ mtr_report_test_name($tinfo);
+
{
# remove the old reject file
if ( $opt_suite eq "main" )
@@ -2359,7 +2336,6 @@ sub run_testcase ($) {
unlink($path_timefile);
my $res= run_mysqltest($tinfo);
-
if ( $res == 0 )
{
mtr_report_test_passed($tinfo);
@@ -2367,6 +2343,8 @@ sub run_testcase ($) {
elsif ( $res == 62 )
{
# Testcase itself tell us to skip this one
+ # FIXME get reason to skip from mysqltest
+ $tinfo->{'comment'}= "Detected by testcase";
mtr_report_test_skipped($tinfo);
}
elsif ( $res == 63 )
@@ -2383,6 +2361,7 @@ sub run_testcase ($) {
"mysqltest returned unexpected code $res, " .
"it has probably crashed");
}
+
report_failure_and_restart($tinfo);
}
# Save info from this testcase run to mysqltest.log
@@ -2398,7 +2377,7 @@ sub run_testcase ($) {
if ( ! $glob_use_running_server and $tinfo->{'component_id'} eq 'im' and
$instance_manager->{'pid'} )
{
- im_stop($instance_manager);
+ im_stop($instance_manager, $tinfo->{'name'});
}
}
@@ -2449,8 +2428,6 @@ sub restore_installed_db ($) {
if ( -d $path_snapshot)
{
- kill_running_server ();
-
mtr_report("Restoring snapshot of databases");
foreach my $data_dir (@data_dir_lst)
@@ -2460,27 +2437,24 @@ sub restore_installed_db ($) {
rmtree("$data_dir");
mtr_copy_dir("$path_snapshot/$name", "$data_dir");
}
- if ($opt_with_ndbcluster)
- {
- # Remove the ndb_*_fs dirs, forcing a clean start of ndb
- rmtree("$path_ndb_data_dir/ndb_1_fs");
- rmtree("$path_ndb_data_dir/ndb_2_fs");
- if ( $opt_with_ndbcluster_slave )
+ # Remove the ndb_*_fs dirs for all ndbd nodes
+ # forcing a clean start of ndb
+ foreach my $cluster (@{$clusters})
+ {
+ foreach my $ndbd (@{$cluster->{'ndbds'}})
{
- # Remove also the ndb_*_fs dirs for slave cluster
- rmtree("$path_ndb_slave_data_dir/ndb_1_fs");
+ rmtree("$ndbd->{'path_fs'}" );
}
}
}
else
{
- # No snapshot existed, just stop all processes
- stop_masters_slaves();
+ # No snapshot existed
+ mtr_error("No snapshot existed");
}
}
-
sub report_failure_and_restart ($) {
my $tinfo= shift;
@@ -2489,6 +2463,9 @@ sub report_failure_and_restart ($) {
print "\n";
if ( $opt_force )
{
+ # Stop all servers that are known to be running
+ stop_all_servers();
+
# Restore the snapshot of the installed test db
restore_installed_db($tinfo->{'name'});
print "Resuming Tests\n\n";
@@ -2502,7 +2479,7 @@ sub report_failure_and_restart ($) {
! $glob_use_running_server and
! $glob_use_embedded_server )
{
- stop_masters_slaves();
+ stop_all_servers();
}
mtr_exit(1);
@@ -2596,18 +2573,17 @@ sub do_before_start_slave ($$) {
}
-sub mysqld_arguments ($$$$$$) {
+sub mysqld_arguments ($$$$$) {
my $args= shift;
- my $type= shift; # master/slave/bootstrap
+ my $type= shift;
my $idx= shift;
my $extra_opt= shift;
my $slave_master_info= shift;
- my $using_ndbcluster= shift;
my $sidx= ""; # Index as string, 0 is empty string
if ( $idx > 0 )
{
- $sidx= sprintf("%d", $idx); # sprintf not needed in Perl for this
+ $sidx= "$idx";
}
my $prefix= ""; # If mysqltest server arg
@@ -2647,12 +2623,12 @@ sub mysqld_arguments ($$$$$$) {
$opt_vardir, $sidx);
}
mtr_add_arg($args, "%s--pid-file=%s", $prefix,
- $master->[$idx]->{'path_mypid'});
+ $master->[$idx]->{'path_pid'});
mtr_add_arg($args, "%s--port=%d", $prefix,
- $master->[$idx]->{'path_myport'});
+ $master->[$idx]->{'port'});
mtr_add_arg($args, "%s--server-id=%d", $prefix, $id);
mtr_add_arg($args, "%s--socket=%s", $prefix,
- $master->[$idx]->{'path_mysock'});
+ $master->[$idx]->{'path_sock'});
mtr_add_arg($args, "%s--innodb_data_file_path=ibdata1:10M:autoextend", $prefix);
mtr_add_arg($args, "%s--local-infile", $prefix);
mtr_add_arg($args, "%s--datadir=%s", $prefix,
@@ -2663,7 +2639,9 @@ sub mysqld_arguments ($$$$$$) {
mtr_add_arg($args, "%s--skip-innodb", $prefix);
}
- if ( $opt_skip_ndbcluster || !$using_ndbcluster)
+ my $cluster= $clusters->[$master->[$idx]->{'cluster'}];
+ if ( $opt_skip_ndbcluster ||
+ !$cluster->{'pid'})
{
mtr_add_arg($args, "%s--skip-ndbcluster", $prefix);
}
@@ -2671,7 +2649,7 @@ sub mysqld_arguments ($$$$$$) {
{
mtr_add_arg($args, "%s--ndbcluster", $prefix);
mtr_add_arg($args, "%s--ndb-connectstring=%s", $prefix,
- $opt_ndbconnectstring);
+ $cluster->{'connect_string'});
mtr_add_arg($args, "%s--ndb-extra-logging", $prefix);
}
}
@@ -2697,14 +2675,14 @@ sub mysqld_arguments ($$$$$$) {
$slave->[$idx]->{'path_mylog'});
mtr_add_arg($args, "%s--master-retry-count=10", $prefix);
mtr_add_arg($args, "%s--pid-file=%s", $prefix,
- $slave->[$idx]->{'path_mypid'});
+ $slave->[$idx]->{'path_pid'});
mtr_add_arg($args, "%s--port=%d", $prefix,
- $slave->[$idx]->{'path_myport'});
+ $slave->[$idx]->{'port'});
mtr_add_arg($args, "%s--relay-log=%s/log/slave%s-relay-bin", $prefix,
$opt_vardir, $sidx);
mtr_add_arg($args, "%s--report-host=127.0.0.1", $prefix);
mtr_add_arg($args, "%s--report-port=%d", $prefix,
- $slave->[$idx]->{'path_myport'});
+ $slave->[$idx]->{'port'});
mtr_add_arg($args, "%s--report-user=root", $prefix);
mtr_add_arg($args, "%s--skip-innodb", $prefix);
mtr_add_arg($args, "%s--skip-ndbcluster", $prefix);
@@ -2716,7 +2694,7 @@ sub mysqld_arguments ($$$$$$) {
mtr_add_arg($args, "%s--slave-load-tmpdir=%s", $prefix,
"../tmp");
mtr_add_arg($args, "%s--socket=%s", $prefix,
- $slave->[$idx]->{'path_mysock'});
+ $slave->[$idx]->{'path_sock'});
mtr_add_arg($args, "%s--set-variable=slave_net_timeout=10", $prefix);
if ( @$slave_master_info )
@@ -2733,20 +2711,22 @@ sub mysqld_arguments ($$$$$$) {
mtr_add_arg($args, "%s--master-host=127.0.0.1", $prefix);
mtr_add_arg($args, "%s--master-password=", $prefix);
mtr_add_arg($args, "%s--master-port=%d", $prefix,
- $master->[0]->{'path_myport'}); # First master
+ $master->[0]->{'port'}); # First master
mtr_add_arg($args, "%s--server-id=%d", $prefix, $slave_server_id);
mtr_add_arg($args, "%s--rpl-recovery-rank=%d", $prefix, $slave_rpl_rank);
}
-
- if ( $opt_skip_ndbcluster_slave )
+
+ if ( $opt_skip_ndbcluster_slave ||
+ $slave->[$idx]->{'cluster'} == -1 ||
+ !$clusters->[$slave->[$idx]->{'cluster'}]->{'pid'} )
{
mtr_add_arg($args, "%s--skip-ndbcluster", $prefix);
}
- if ( $idx == 0 and $using_ndbcluster_slave )
+ else
{
mtr_add_arg($args, "%s--ndbcluster", $prefix);
mtr_add_arg($args, "%s--ndb-connectstring=%s", $prefix,
- $opt_ndbconnectstring_slave);
+ $clusters->[$slave->[$idx]->{'cluster'}]->{'connect_string'});
mtr_add_arg($args, "%s--ndb-extra-logging", $prefix);
}
} # end slave
@@ -2800,24 +2780,6 @@ sub mysqld_arguments ($$$$$$) {
mtr_add_arg($args, "%s--user=root", $prefix);
}
- if ( $type eq 'master' )
- {
-
- if ( ! $opt_old_master )
- {
- mtr_add_arg($args, "%s--rpl-recovery-rank=1", $prefix);
- mtr_add_arg($args, "%s--init-rpl-role=master", $prefix);
- }
-
- # FIXME strange,.....
- # FIXME MYSQL_MYPORT is not set anythere?!
- if ( $opt_local_master )
- {
- mtr_add_arg($args, "%s--host=127.0.0.1", $prefix);
- mtr_add_arg($args, "%s--port=%s", $prefix, $ENV{'MYSQL_MYPORT'});
- }
- }
-
foreach my $arg ( @opt_extra_mysqld_opt, @$extra_opt )
{
mtr_add_arg($args, "%s%s", $prefix, $arg);
@@ -2845,23 +2807,24 @@ sub mysqld_arguments ($$$$$$) {
#
##############################################################################
-sub mysqld_start ($$$$$) {
- my $type= shift; # master/slave/bootstrap
- my $idx= shift;
+sub mysqld_start ($$$) {
+ my $mysqld= shift;
my $extra_opt= shift;
my $slave_master_info= shift;
- my $using_ndbcluster= shift;
-
my $args; # Arg vector
my $exe;
my $pid= -1;
+ my $wait_for_pid_file= 1;
+
+ my $type= $mysqld->{'type'};
+ my $idx= $mysqld->{'idx'};
if ( $type eq 'master' )
{
$exe= $exe_master_mysqld;
}
- elsif ( $type eq 'slave' )
+ if ( $type eq 'slave' )
{
$exe= $exe_slave_mysqld;
}
@@ -2877,8 +2840,7 @@ sub mysqld_start ($$$$$) {
valgrind_arguments($args, \$exe);
}
- mysqld_arguments($args,$type,$idx,$extra_opt,$slave_master_info,
- $using_ndbcluster);
+ mysqld_arguments($args,$type,$idx,$extra_opt,$slave_master_info);
if ( $opt_gdb || $opt_manual_gdb)
{
@@ -2903,6 +2865,11 @@ sub mysqld_start ($$$$$) {
# Indicate the exe should not be started
$exe= undef;
}
+ else
+ {
+ # Default to not wait until pid file has been created
+ $wait_for_pid_file= 0;
+ }
if ($exe_libtool and $opt_valgrind)
{
@@ -2913,116 +2880,500 @@ sub mysqld_start ($$$$$) {
}
- if ( $type eq 'master' )
+ if ( defined $exe )
+ {
+ $pid= mtr_spawn($exe, $args, "",
+ $mysqld->{'path_myerr'},
+ $mysqld->{'path_myerr'},
+ "",
+ { append_log_file => 1 });
+ }
+
+
+ if ( $wait_for_pid_file && !sleep_until_file_created($mysqld->{'path_pid'},
+ $mysqld->{'start_timeout'},
+ $pid))
+ {
+
+ mtr_error("Failed to start mysqld $mysqld->{'type'}");
+ }
+
+
+ # Remember pid of the started process
+ $mysqld->{'pid'}= $pid;
+
+ # Remember options used when starting
+ $mysqld->{'start_opts'}= $extra_opt;
+
+ mtr_verbose("mysqld pid: $pid");
+ return $pid;
+}
+
+
+sub stop_all_servers () {
+
+ print "Stopping All Servers\n";
+
+ if ( $instance_manager->{'pid'} )
{
- if ( ! defined $exe or
- $pid= mtr_spawn($exe, $args, "",
- $master->[$idx]->{'path_myerr'},
- $master->[$idx]->{'path_myerr'},
- "",
- { append_log_file => 1 }) )
+ print "Shutting-down Instance Manager\n";
+ im_stop($instance_manager, "stop_all_servers");
+ }
+
+ my %admin_pids; # hash of admin processes that requests shutdown
+ my @kill_pids; # list of processes to shutdown/kill
+ my $pid;
+
+ # Start shutdown of all started masters
+ foreach my $mysqld (@{$master}, @{$slave})
+ {
+ if ( $mysqld->{'pid'} )
{
- return sleep_until_file_created($master->[$idx]->{'path_mypid'},
- $master->[$idx]->{'start_timeout'},
- $pid);
+ $pid= mtr_mysqladmin_start($mysqld, "shutdown", 70);
+ $admin_pids{$pid}= 1;
+
+ push(@kill_pids,{
+ pid => $mysqld->{'pid'},
+ pidfile => $mysqld->{'path_pid'},
+ sockfile => $mysqld->{'path_sock'},
+ port => $mysqld->{'port'},
+ });
+
+ $mysqld->{'pid'}= 0; # Assume we are done with it
}
}
- if ( $type eq 'slave' )
+ # Start shutdown of clusters
+ foreach my $cluster (@{$clusters})
{
- if ( ! defined $exe or
- $pid= mtr_spawn($exe, $args, "",
- $slave->[$idx]->{'path_myerr'},
- $slave->[$idx]->{'path_myerr'},
- "",
- { append_log_file => 1 }) )
+ if ( $cluster->{'pid'} )
{
- return sleep_until_file_created($slave->[$idx]->{'path_mypid'},
- $master->[$idx]->{'start_timeout'},
- $pid);
+ $pid= mtr_ndbmgm_start($cluster, "shutdown");
+ $admin_pids{$pid}= 1;
+
+ push(@kill_pids,{
+ pid => $cluster->{'pid'},
+ pidfile => $cluster->{'path_pid'}
+ });
+
+ $cluster->{'pid'}= 0; # Assume we are done with it
+
+ foreach my $ndbd (@{$cluster->{'ndbds'}})
+ {
+ if ( $ndbd->{'pid'} )
+ {
+ push(@kill_pids,{
+ pid => $ndbd->{'pid'},
+ pidfile => $ndbd->{'path_pid'},
+ });
+ $ndbd->{'pid'}= 0;
+ }
+ }
}
}
- return 0;
+ # Wait blocking until all shutdown processes has completed
+ mtr_wait_blocking(\%admin_pids);
+
+ # Make sure that process has shutdown else try to kill them
+ mtr_check_stop_servers(\@kill_pids);
+
+ foreach my $mysqld (@{$master}, @{$slave})
+ {
+ rm_ndbcluster_tables($mysqld->{'path_myddir'});
+ }
}
-sub stop_masters_slaves () {
+# ----------------------------------------------------------------------
+# If not using a running servers we may need to stop and restart.
+# We restart in the case we have initiation scripts, server options
+# etc to run. But we also restart again after the test first restart
+# and test is run, to get back to normal server settings.
+#
+# To make the code a bit more clean, we actually only stop servers
+# here, and mark this to be done. Then a generic "start" part will
+# start up the needed servers again.
+# ----------------------------------------------------------------------
- print "Ending Tests\n";
+sub run_testcase_stop_servers($) {
+ my $tinfo= shift;
- if ( $instance_manager->{'pid'} )
+ if ( $glob_use_running_server || $glob_use_embedded_server )
{
- print "Shutting-down Instance Manager\n";
- im_stop($instance_manager);
+ return;
}
- print "Shutting-down MySQL daemon\n\n";
- stop_masters();
- print "Master(s) shutdown finished\n";
- stop_slaves();
- print "Slave(s) shutdown finished\n";
-}
+ # We try to find out if we are to restart the master(s)
+ my $do_restart= 0; # Assumes we don't have to
+ if ( $tinfo->{'master_sh'} )
+ {
+ $do_restart= 1; # Always restart if script to run
+ mtr_verbose("Restart because: Always restart if script to run");
+ }
+ elsif ( ! $opt_skip_ndbcluster and
+ $tinfo->{'ndb_test'} == 0 and
+ $clusters->[0]->{'pid'} != 0 )
+ {
+ $do_restart= 1; # Restart without cluster
+ mtr_verbose("Restart because: Test does not need cluster");
+ }
+ elsif ( ! $opt_skip_ndbcluster and
+ $tinfo->{'ndb_test'} == 1 and
+ $clusters->[0]->{'pid'} == 0 )
+ {
+ $do_restart= 1; # Restart with cluster
+ mtr_verbose("Restart because: Test need cluster");
+ }
+ elsif ( $master->[0]->{'running_master_is_special'} and
+ $master->[0]->{'running_master_is_special'}->{'timezone'} eq
+ $tinfo->{'timezone'} and
+ mtr_same_opts($master->[0]->{'running_master_is_special'}->{'master_opt'},
+ $tinfo->{'master_opt'}) )
+ {
+ # If running master was started with special settings, but
+ # the current test requires the same ones, we *don't* restart.
+ $do_restart= 0;
+ mtr_verbose("Skip restart: options are equal " .
+ join(" ", @{$tinfo->{'master_opt'}}));
+ }
+ elsif ( $tinfo->{'master_restart'} )
+ {
+ $do_restart= 1;
+ mtr_verbose("Restart because: master_restart");
+ }
+ elsif ( $master->[0]->{'running_master_is_special'} )
+ {
+ $do_restart= 1;
+ mtr_verbose("Restart because: running_master_is_special");
+ }
+ # Check that running master was started with same options
+ # as the current test requires
+ elsif (! mtr_same_opts($master->[0]->{'start_opts'},
+ $tinfo->{'master_opt'}) )
+ {
+ $do_restart= 1;
+ mtr_verbose("Restart because: running with different options '" .
+ join(" ", @{$tinfo->{'master_opt'}}) . "' != '" .
+ join(" ", @{$master->[0]->{'start_opts'}}) . "'" );
+ }
-sub stop_masters () {
+ my $pid;
+ my %admin_pids; # hash of admin processes that requests shutdown
+ my @kill_pids; # list of processes to shutdown/kill
- my @args;
- for ( my $idx; $idx < 2; $idx++ )
+ # Remember if we restarted for this test case
+ $tinfo->{'restarted'}= $do_restart;
+
+ if ( $do_restart )
{
- # FIXME if we hit ^C before fully started, this test will prevent
- # the mysqld process from being killed
- if ( $master->[$idx]->{'pid'} )
+ delete $master->[0]->{'running_master_is_special'}; # Forget history
+
+ # Start shutdown of all started masters
+ foreach my $mysqld (@{$master})
{
- push(@args,{
- pid => $master->[$idx]->{'pid'},
- pidfile => $master->[$idx]->{'path_mypid'},
- sockfile => $master->[$idx]->{'path_mysock'},
- port => $master->[$idx]->{'path_myport'},
- });
- $master->[$idx]->{'pid'}= 0; # Assume we are done with it
+ if ( $mysqld->{'pid'} )
+ {
+ $pid= mtr_mysqladmin_start($mysqld, "shutdown", 70);
+
+ $admin_pids{$pid}= 1;
+
+ push(@kill_pids,{
+ pid => $mysqld->{'pid'},
+ pidfile => $mysqld->{'path_pid'},
+ sockfile => $mysqld->{'path_sock'},
+ port => $mysqld->{'port'},
+ });
+
+ $mysqld->{'pid'}= 0; # Assume we are done with it
+ }
+ }
+
+ # Start shutdown of master cluster
+ my $cluster= $clusters->[0];
+ if ( $cluster->{'pid'} )
+ {
+ $pid= mtr_ndbmgm_start($cluster, "shutdown");
+ $admin_pids{$pid}= 1;
+
+ push(@kill_pids,{
+ pid => $cluster->{'pid'},
+ pidfile => $cluster->{'path_pid'}
+ });
+
+ $cluster->{'pid'}= 0; # Assume we are done with it
+
+ foreach my $ndbd (@{$cluster->{'ndbds'}})
+ {
+ push(@kill_pids,{
+ pid => $ndbd->{'pid'},
+ pidfile => $ndbd->{'path_pid'},
+ });
+ $ndbd->{'pid'}= 0; # Assume we are done with it
+ }
}
}
- if ( ! $master->[0]->{'ndbcluster'} )
- {
- ndbcluster_stop();
- $master->[0]->{'ndbcluster'}= 1;
+ # We try to find out if we are to restart the slaves
+ my $do_slave_restart= 0; # Assumes we don't have to
+
+ # FIXME only restaret when necessary
+ $do_slave_restart= 1;
+
+# if ( ! $slave->[0]->{'pid'} )
+# {
+# # mtr_verbose("Slave not started, no need to check slave restart");
+# }
+# elsif ( $do_restart )
+# {
+# $do_slave_restart= 1; # Always restart if master restart
+# mtr_verbose("Restart slave because: Master restart");
+# }
+# elsif ( $tinfo->{'slave_sh'} )
+# {
+# $do_slave_restart= 1; # Always restart if script to run
+# mtr_verbose("Restart slave because: Always restart if script to run");
+# }
+# elsif ( ! $opt_skip_ndbcluster_slave and
+# $tinfo->{'ndb_test'} == 0 and
+# $clusters->[1]->{'pid'} != 0 )
+# {
+# $do_slave_restart= 1; # Restart without slave cluster
+# mtr_verbose("Restart slave because: Test does not need slave cluster");
+# }
+# elsif ( ! $opt_with_ndbcluster_slave and
+# $tinfo->{'ndb_test'} == 1 and
+# $clusters->[1]->{'pid'} == 0 )
+# {
+# $do_slave_restart= 1; # Restart with slave cluster
+# mtr_verbose("Restart slave because: Test need slave cluster");
+# }
+# elsif ( $tinfo->{'slave_restart'} )
+# {
+# $do_slave_restart= 1;
+# mtr_verbose("Restart slave because: slave_restart");
+# }
+# elsif ( $slave->[0]->{'running_slave_is_special'} )
+# {
+# $do_slave_restart= 1;
+# mtr_verbose("Restart slave because: running_slave_is_special");
+# }
+# # Check that running slave was started with same options
+# # as the current test requires
+# elsif (! mtr_same_opts($slave->[0]->{'start_opts'},
+# $tinfo->{'slave_opt'}) )
+# {
+# $do_slave_restart= 1;
+# mtr_verbose("Restart slave because: running with different options '" .
+# join(" ", @{$tinfo->{'slave_opt'}}) . "' != '" .
+# join(" ", @{$slave->[0]->{'start_opts'}}) . "'" );
+# }
+
+ if ( $do_slave_restart )
+ {
+
+ delete $slave->[0]->{'running_slave_is_special'}; # Forget history
+
+ # Start shutdown of all started slaves
+ foreach my $mysqld (@{$slave})
+ {
+ if ( $mysqld->{'pid'} )
+ {
+ $pid= mtr_mysqladmin_start($mysqld, "shutdown", 70);
+
+ $admin_pids{$pid}= 1;
+
+ push(@kill_pids,{
+ pid => $mysqld->{'pid'},
+ pidfile => $mysqld->{'path_pid'},
+ sockfile => $mysqld->{'path_sock'},
+ port => $mysqld->{'port'},
+ });
+
+
+ $mysqld->{'pid'}= 0; # Assume we are done with it
+ }
+ }
+
+ # Start shutdown of slave cluster
+ my $cluster= $clusters->[1];
+ if ( $cluster->{'pid'} )
+ {
+ $pid= mtr_ndbmgm_start($cluster, "shutdown");
+
+ $admin_pids{$pid}= 1;
+
+ push(@kill_pids,{
+ pid => $cluster->{'pid'},
+ pidfile => $cluster->{'path_pid'}
+ });
+
+ $cluster->{'pid'}= 0; # Assume we are done with it
+
+ foreach my $ndbd (@{$cluster->{'ndbds'}} )
+ {
+ push(@kill_pids,{
+ pid => $ndbd->{'pid'},
+ pidfile => $ndbd->{'path_pid'},
+ });
+ $ndbd->{'pid'}= 0; # Assume we are done with it
+ }
+ }
}
- mtr_stop_mysqld_servers(\@args);
+ # ----------------------------------------------------------------------
+ # Shutdown has now been started and lists for the shutdown processes
+ # and the processes to be killed has been created
+ # ----------------------------------------------------------------------
+
+ # Wait blocking until all shutdown processes has completed
+ mtr_wait_blocking(\%admin_pids);
+
+
+ # Make sure that process has shutdown else try to kill them
+ mtr_check_stop_servers(\@kill_pids);
+
+ foreach my $mysqld (@{$master}, @{$slave})
+ {
+ if ( ! $mysqld->{'pid'} )
+ {
+ # Remove ndbcluster tables if server is stopped
+ rm_ndbcluster_tables($mysqld->{'path_myddir'});
+ }
+ }
}
+sub run_testcase_start_servers($) {
+ my $tinfo= shift;
-sub stop_slaves () {
- my $force= shift;
+ my $tname= $tinfo->{'name'};
- my @args;
+ if ( $glob_use_running_server or $glob_use_embedded_server )
+ {
+ return;
+ }
- for ( my $idx; $idx < 3; $idx++ )
+ if ( $tinfo->{'component_id'} eq 'mysqld' )
{
- if ( $slave->[$idx]->{'pid'} )
+ if ( ! $opt_skip_ndbcluster and
+ !$clusters->[0]->{'pid'} and
+ $tinfo->{'ndb_test'} )
{
- push(@args,{
- pid => $slave->[$idx]->{'pid'},
- pidfile => $slave->[$idx]->{'path_mypid'},
- sockfile => $slave->[$idx]->{'path_mysock'},
- port => $slave->[$idx]->{'path_myport'},
- });
- $slave->[$idx]->{'pid'}= 0; # Assume we are done with it
+ # Test need cluster, cluster is not started, start it
+ ndbcluster_start($clusters->[0], "");
+ }
+
+ if ( !$master->[0]->{'pid'} )
+ {
+ # Master mysqld is not started
+ do_before_start_master($tname,$tinfo->{'master_sh'});
+
+ mysqld_start($master->[0],$tinfo->{'master_opt'},[]);
+
+ }
+
+ if ( $clusters->[0]->{'pid'} and ! $master->[1]->{'pid'} )
+ {
+ # Test needs cluster, start an extra mysqld connected to cluster
+
+ # First wait for first mysql server to have created ndb system tables ok
+ # FIXME This is a workaround so that only one mysqld creates the tables
+ if ( ! sleep_until_file_created(
+ "$master->[0]->{'path_myddir'}/cluster/apply_status.ndb",
+ $master->[0]->{'start_timeout'},
+ $master->[0]->{'pid'}))
+ {
+ mtr_report("Failed to create 'cluster/apply_status' table");
+ report_failure_and_restart($tinfo);
+ return;
+ }
+ mtr_tofile($master->[1]->{'path_myerr'},"CURRENT_TEST: $tname\n");
+
+ mysqld_start($master->[1],$tinfo->{'master_opt'},[]);
+ }
+
+ if ( $tinfo->{'master_restart'} )
+ {
+ # Save this test case information, so next can examine it
+ $master->[0]->{'running_master_is_special'}= $tinfo;
}
}
+ elsif ( ! $opt_skip_im and $tinfo->{'component_id'} eq 'im' )
+ {
+ # We have to create defaults file every time, in order to ensure that it
+ # will be the same for each test. The problem is that test can change the
+ # file (by SET/UNSET commands), so w/o recreating the file, execution of
+ # one test can affect the other.
+
+ im_create_defaults_file($instance_manager);
- if ( ! $slave->[0]->{'ndbcluster'} )
+ im_start($instance_manager, $tinfo->{im_opts});
+ }
+
+ # ----------------------------------------------------------------------
+ # Start slaves - if needed
+ # ----------------------------------------------------------------------
+ if ( $tinfo->{'slave_num'} )
{
- ndbcluster_stop_slave();
- $slave->[0]->{'ndbcluster'}= 1;
+ mtr_tofile($slave->[0]->{'path_myerr'},"CURRENT_TEST: $tname\n");
+
+ do_before_start_slave($tname,$tinfo->{'slave_sh'});
+
+ if ( ! $opt_skip_ndbcluster_slave and
+ !$clusters->[1]->{'pid'} and
+ $tinfo->{'ndb_test'} )
+ {
+ # Test need slave cluster, cluster is not started, start it
+ ndbcluster_start($clusters->[1], "");
+ }
+
+ for ( my $idx= 0; $idx < $tinfo->{'slave_num'}; $idx++ )
+ {
+ if ( ! $slave->[$idx]->{'pid'} )
+ {
+ mysqld_start($slave->[$idx],$tinfo->{'slave_opt'},
+ $tinfo->{'slave_mi'});
+
+ }
+ }
+
+ if ( $tinfo->{'slave_restart'} )
+ {
+ # Save this test case information, so next can examine it
+ $slave->[0]->{'running_slave_is_special'}= $tinfo;
+ }
+
}
- mtr_stop_mysqld_servers(\@args);
+ # Wait for clusters to start
+ foreach my $cluster (@{$clusters})
+ {
+
+ next if !$cluster->{'pid'};
+
+ if (ndbcluster_wait_started($cluster, ""))
+ {
+ # failed to start
+ mtr_report("Start of $cluster->{'name'} cluster failed, ");
+ }
+ }
+
+ # Wait for mysqld's to start
+ foreach my $mysqld (@{$master},@{$slave})
+ {
+
+ next if !$mysqld->{'pid'};
+
+ if (mysqld_wait_started($mysqld))
+ {
+ mtr_warning("Failed to start $mysqld->{'type'} mysqld $mysqld->{'idx'}");
+ }
+ }
}
+
##############################################################################
#
# Instance Manager management routines.
@@ -3079,13 +3430,15 @@ sub im_start($$) {
return;
}
- $instance_manager->{'pid'} =
- mtr_get_pid_from_file($instance_manager->{'path_pid'});
+ my $pid= mtr_get_pid_from_file($instance_manager->{'path_pid'});
+ $instance_manager->{'pid'} = $pid;
+ mtr_verbose("im_start: pid: $pid");
}
-sub im_stop($) {
+sub im_stop($$) {
my $instance_manager = shift;
+ my $where = shift;
# Obtain mysqld-process pids before we start stopping IM (it can delete pid
# files).
@@ -3122,25 +3475,23 @@ sub im_stop($) {
# Try graceful shutdown.
- mtr_debug("IM-main pid: $instance_manager->{'pid'}");
- mtr_debug("Stopping IM-main...");
-
- mtr_kill_process($instance_manager->{'pid'}, 'TERM', 10, 1);
+ mtr_verbose("Stopping IM-main, pid: $instance_manager->{'pid'}");
+ mtr_kill_process($instance_manager->{'pid'}, 'TERM', 10);
# If necessary, wait for angel process to die.
- if (defined $instance_manager->{'angel_pid'})
+ my $pid= $instance_manager->{'angel_pid'};
+ if (defined $pid)
{
- mtr_debug("IM-angel pid: $instance_manager->{'angel_pid'}");
- mtr_debug("Waiting for IM-angel to die...");
+ mtr_verbose("Waiting for IM-angel to die, pid: $pid");
my $total_attempts= 10;
for (my $cur_attempt=1; $cur_attempt <= $total_attempts; ++$cur_attempt)
{
- unless (kill (0, $instance_manager->{'angel_pid'}))
+ unless (kill (0, $pid))
{
- mtr_debug("IM-angel died.");
+ mtr_verbose("IM-angel died.");
last;
}
@@ -3148,57 +3499,49 @@ sub im_stop($) {
}
}
- # Check that all processes died.
-
- my $clean_shutdown= 0;
+ # Check if all processes shutdown cleanly
+ my $clean_shutdown= 1; # Assum they did
- while (1)
+ if (kill (0, $instance_manager->{'pid'}))
{
- if (kill (0, $instance_manager->{'pid'}))
- {
- mtr_debug("IM-main is still alive.");
- last;
- }
+ mtr_warning("IM-main is still alive.");
+ $clean_shutdown= 0;
+ }
- if (defined $instance_manager->{'angel_pid'} &&
- kill (0, $instance_manager->{'angel_pid'}))
- {
- mtr_debug("IM-angel is still alive.");
- last;
- }
+ if (defined $instance_manager->{'angel_pid'} &&
+ kill (0, $instance_manager->{'angel_pid'}))
+ {
+ mtr_warning("IM-angel is still alive.");
+ $clean_shutdown= 0;
+ }
- foreach my $pid (@mysqld_pids)
+ foreach my $pid (@mysqld_pids)
+ {
+ if (kill (0, $pid))
{
- if (kill (0, $pid))
- {
- mtr_debug("Guarded mysqld ($pid) is still alive.");
- last;
- }
+ mtr_warning("Guarded mysqld ($pid) is still alive.");
+ $clean_shutdown= 0;
}
-
- $clean_shutdown= 1;
- last;
}
# Kill leftovers (the order is important).
-
unless ($clean_shutdown)
{
if (defined $instance_manager->{'angel_pid'})
{
- mtr_debug("Killing IM-angel...");
- mtr_kill_process($instance_manager->{'angel_pid'}, 'KILL', 10, 1)
+ mtr_verbose("Killing IM-angel, pid: $instance_manager->{'angel_pid'}");
+ mtr_kill_process($instance_manager->{'angel_pid'}, 'KILL', 10)
}
-
- mtr_debug("Killing IM-main...");
- mtr_kill_process($instance_manager->{'pid'}, 'KILL', 10, 1);
+
+ mtr_verbose("Killing IM-main, pid: $instance_manager->{'pid'}");
+ mtr_kill_process($instance_manager->{'pid'}, 'KILL', 10);
# Shutdown managed mysqld-processes. Some of them may be nonguarded, so IM
# will not stop them on shutdown. So, we should firstly try to end them
# legally.
- mtr_debug("Killing guarded mysqld(s)...");
+ mtr_verbose("Killing guarded mysqld(s) " . join(" ", @mysqld_pids));
mtr_kill_processes(\@mysqld_pids);
# Complain in error log so that a warning will be shown.
@@ -3210,7 +3553,7 @@ sub im_stop($) {
my $ts= localtime();
print ERRLOG
- "Warning: [$ts] Instance Manager did not shutdown gracefully.\n";
+ "[$where] Warning: [$ts] Instance Manager did not shutdown gracefully.\n";
close ERRLOG;
}
@@ -3229,9 +3572,12 @@ sub im_stop($) {
# Before a testcase, run in record mode, save result file to var
# After testcase, run and compare with the recorded file, they should be equal!
#
-sub run_check_testcase ($) {
+sub run_check_testcase ($$) {
my $mode= shift;
+ my $mysqld= shift;
+
+ my $name= "check-" . $mysqld->{'type'} . $mysqld->{'idx'};
my $args;
mtr_init_args(\$args);
@@ -3242,14 +3588,14 @@ sub run_check_testcase ($) {
mtr_add_arg($args, "--skip-safemalloc");
mtr_add_arg($args, "--tmpdir=%s", $opt_tmpdir);
- mtr_add_arg($args, "--socket=%s", $master->[0]->{'path_mysock'});
- mtr_add_arg($args, "--port=%d", $master->[0]->{'path_myport'});
+ mtr_add_arg($args, "--socket=%s", $mysqld->{'path_sock'});
+ mtr_add_arg($args, "--port=%d", $mysqld->{'port'});
mtr_add_arg($args, "--database=test");
mtr_add_arg($args, "--user=%s", $opt_user);
mtr_add_arg($args, "--password=");
mtr_add_arg($args, "-R");
- mtr_add_arg($args, "$opt_vardir/tmp/check-testcase.result");
+ mtr_add_arg($args, "$opt_vardir/tmp/$name.result");
if ( $mode eq "before" )
{
@@ -3262,8 +3608,8 @@ sub run_check_testcase ($) {
if ( $res == 1 and $mode = "after")
{
mtr_run("diff",["-u",
- "$opt_vardir/tmp/check-testcase.result",
- "$opt_vardir/tmp/check-testcase.reject"],
+ "$opt_vardir/tmp/$name.result",
+ "$opt_vardir/tmp/$name.reject"],
"", "", "", "");
}
elsif ( $res )
@@ -3273,29 +3619,34 @@ sub run_check_testcase ($) {
}
+sub generate_cmdline_mysqldump ($) {
+ my($info) = @_;
+ return
+ "$exe_mysqldump --no-defaults -uroot " .
+ "--port=$info->[0]->{'port'} " .
+ "--socket=$info->[0]->{'path_sock'} --password=";
+}
+
sub run_mysqltest ($) {
my $tinfo= shift;
-
my $cmdline_mysqlcheck= "$exe_mysqlcheck --no-defaults -uroot " .
- "--port=$master->[0]->{'path_myport'} " .
- "--socket=$master->[0]->{'path_mysock'} --password=";
+ "--port=$master->[0]->{'port'} " .
+ "--socket=$master->[0]->{'path_sock'} --password=";
if ( $opt_debug )
{
$cmdline_mysqlcheck .=
" --debug=d:t:A,$opt_vardir_trace/log/mysqlcheck.trace";
}
- my $cmdline_mysqldump= "$exe_mysqldump --no-defaults -uroot " .
- "--port=$master->[0]->{'path_myport'} " .
- "--socket=$master->[0]->{'path_mysock'} --password=";
-
- my $cmdline_mysqldumpslave= "$exe_mysqldump --no-defaults -uroot " .
- "--socket=$slave->[0]->{'path_mysock'} --password=";
+ my $cmdline_mysqldump= generate_cmdline_mysqldump $master;
+ my $cmdline_mysqldumpslave= generate_cmdline_mysqldump $slave;
if ( $opt_debug )
{
$cmdline_mysqldump .=
- " --debug=d:t:A,$opt_vardir_trace/log/mysqldump.trace";
+ " --debug=d:t:A,$opt_vardir_trace/log/mysqldump-master.trace";
+ $cmdline_mysqldumpslave .=
+ " --debug=d:t:A,$opt_vardir_trace/log/mysqldump-slave.trace";
}
my $cmdline_mysqlslap;
@@ -3303,8 +3654,8 @@ sub run_mysqltest ($) {
unless ( $glob_win32 )
{
$cmdline_mysqlslap= "$exe_mysqlslap -uroot " .
- "--port=$master->[0]->{'path_myport'} " .
- "--socket=$master->[0]->{'path_mysock'} --password= " .
+ "--port=$master->[0]->{'port'} " .
+ "--socket=$master->[0]->{'path_sock'} --password= " .
"--lock-directory=$opt_tmpdir";
if ( $opt_debug )
{
@@ -3314,8 +3665,8 @@ sub run_mysqltest ($) {
}
my $cmdline_mysqlimport= "$exe_mysqlimport -uroot " .
- "--port=$master->[0]->{'path_myport'} " .
- "--socket=$master->[0]->{'path_mysock'} --password=";
+ "--port=$master->[0]->{'port'} " .
+ "--socket=$master->[0]->{'path_sock'} --password=";
if ( $opt_debug )
{
$cmdline_mysqlimport .=
@@ -3323,8 +3674,8 @@ sub run_mysqltest ($) {
}
my $cmdline_mysqlshow= "$exe_mysqlshow -uroot " .
- "--port=$master->[0]->{'path_myport'} " .
- "--socket=$master->[0]->{'path_mysock'} --password=";
+ "--port=$master->[0]->{'port'} " .
+ "--socket=$master->[0]->{'path_sock'} --password=";
if ( $opt_debug )
{
$cmdline_mysqlshow .=
@@ -3344,13 +3695,20 @@ sub run_mysqltest ($) {
my $cmdline_mysql=
"$exe_mysql --no-defaults --host=localhost --user=root --password= " .
- "--port=$master->[0]->{'path_myport'} " .
- "--socket=$master->[0]->{'path_mysock'}";
+ "--port=$master->[0]->{'port'} " .
+ "--socket=$master->[0]->{'path_sock'}";
my $cmdline_mysql_client_test=
"$exe_mysql_client_test --no-defaults --testcase --user=root --silent " .
- "--port=$master->[0]->{'path_myport'} " .
- "--socket=$master->[0]->{'path_mysock'}";
+ "--port=$master->[0]->{'port'} " .
+ "--vardir=$opt_vardir " .
+ "--socket=$master->[0]->{'path_sock'}";
+
+ if ( $opt_debug )
+ {
+ $cmdline_mysql_client_test .=
+ " --debug=d:t:A,$opt_vardir_trace/log/mysql_client_test.trace";
+ }
if ( $glob_use_embedded_server )
{
@@ -3363,8 +3721,8 @@ sub run_mysqltest ($) {
my $cmdline_mysql_fix_system_tables=
"$exe_mysql_fix_system_tables --no-defaults --host=localhost --user=root --password= " .
"--basedir=$glob_basedir --bindir=$path_client_bindir --verbose " .
- "--port=$master->[0]->{'path_myport'} " .
- "--socket=$master->[0]->{'path_mysock'}";
+ "--port=$master->[0]->{'port'} " .
+ "--socket=$master->[0]->{'path_sock'}";
$ENV{'MYSQL'}= $cmdline_mysql;
$ENV{'MYSQL_CHECK'}= $cmdline_mysqlcheck;
@@ -3381,12 +3739,12 @@ sub run_mysqltest ($) {
$ENV{'UDF_EXAMPLE_LIB'}=
($lib_udf_example ? basename($lib_udf_example) : "");
- $ENV{'NDB_STATUS_OK'}= $flag_ndb_status_ok ? "YES" : "NO";
- $ENV{'NDB_SLAVE_STATUS_OK'}= $flag_ndb_slave_status_ok ? "YES" : "NO";
+ $ENV{'NDB_STATUS_OK'}= $clusters->[0]->{'installed_ok'};
+ $ENV{'NDB_SLAVE_STATUS_OK'}= $clusters->[0]->{'installed_ok'};;
$ENV{'NDB_EXTRA_TEST'}= $opt_ndb_extra_test;
$ENV{'NDB_MGM'}= $exe_ndb_mgm;
- $ENV{'NDB_BACKUP_DIR'}= $path_ndb_data_dir;
- $ENV{'NDB_DATA_DIR'}= $path_ndb_data_dir;
+ $ENV{'NDB_BACKUP_DIR'}= $clusters->[0]->{'data_dir'};
+ $ENV{'NDB_DATA_DIR'}= $clusters->[0]->{'data_dir'};
$ENV{'NDB_TOOLS_DIR'}= $path_ndb_tools_dir;
$ENV{'NDB_TOOLS_OUTPUT'}= $file_ndb_testrun_log;
$ENV{'NDB_CONNECTSTRING'}= $opt_ndbconnectstring;
@@ -3411,8 +3769,8 @@ sub run_mysqltest ($) {
}
else # component_id == mysqld
{
- mtr_add_arg($args, "--socket=%s", $master->[0]->{'path_mysock'});
- mtr_add_arg($args, "--port=%d", $master->[0]->{'path_myport'});
+ mtr_add_arg($args, "--socket=%s", $master->[0]->{'path_sock'});
+ mtr_add_arg($args, "--port=%d", $master->[0]->{'port'});
mtr_add_arg($args, "--database=test");
mtr_add_arg($args, "--user=%s", $opt_user);
mtr_add_arg($args, "--password=");
@@ -3499,7 +3857,7 @@ sub run_mysqltest ($) {
if ( $glob_use_embedded_server )
{
- mysqld_arguments($args,'master',0,$tinfo->{'master_opt'},[],0);
+ mysqld_arguments($args,'master',0,$tinfo->{'master_opt'},[]);
}
# ----------------------------------------------------------------------
@@ -3556,14 +3914,26 @@ sub run_mysqltest ($) {
if ( $opt_check_testcases )
{
- run_check_testcase("before");
+ foreach my $mysqld (@{$master}, @{$slave})
+ {
+ if ($mysqld->{'pid'})
+ {
+ run_check_testcase("before", $mysqld);
+ }
+ }
}
my $res = mtr_run_test($exe,$args,"","",$path_timefile,"");
if ( $opt_check_testcases )
{
- run_check_testcase("after");
+ foreach my $mysqld (@{$master}, @{$slave})
+ {
+ if ($mysqld->{'pid'})
+ {
+ run_check_testcase("after", $mysqld);
+ }
+ }
}
return $res;
@@ -3742,12 +4112,20 @@ sub valgrind_arguments {
my $args= shift;
my $exe= shift;
- mtr_add_arg($args, "--tool=memcheck"); # From >= 2.1.2 needs this option
- mtr_add_arg($args, "--alignment=8");
- mtr_add_arg($args, "--leak-check=yes");
- mtr_add_arg($args, "--num-callers=16");
- mtr_add_arg($args, "--suppressions=%s/valgrind.supp", $glob_mysql_test_dir)
- if -f "$glob_mysql_test_dir/valgrind.supp";
+ if ( $opt_callgrind)
+ {
+ mtr_add_arg($args, "--tool=callgrind");
+ mtr_add_arg($args, "--base=$opt_vardir/log");
+ }
+ else
+ {
+ mtr_add_arg($args, "--tool=memcheck"); # From >= 2.1.2 needs this option
+ mtr_add_arg($args, "--alignment=8");
+ mtr_add_arg($args, "--leak-check=yes");
+ mtr_add_arg($args, "--num-callers=16");
+ mtr_add_arg($args, "--suppressions=%s/valgrind.supp", $glob_mysql_test_dir)
+ if -f "$glob_mysql_test_dir/valgrind.supp";
+ }
# Add valgrind options, can be overriden by user
mtr_add_arg($args, '%s', $_) for (split(' ', $opt_valgrind_options));
@@ -3804,10 +4182,11 @@ Options to control directories to use
Options to control what test suites or cases to run
force Continue to run the suite after failure
- with-ndbcluster Use cluster, and enable test cases that requires it
- with-ndbcluster-all Use cluster in all tests
+ with-ndbcluster Use cluster in all tests
with-ndbcluster-only Run only tests that include "ndb" in the filename
- skip-ndb[cluster] Skip the ndb test cases, don't start cluster
+ skip-ndb[cluster] Skip all tests that need cluster
+ skip-ndb[cluster]-slave Skip all tests that need a slave cluster
+ ndb-extra Run extra tests from ndb directory
do-test=PREFIX Run test cases which name are prefixed with PREFIX
start-from=PREFIX Run test cases starting from test prefixed with PREFIX
suite=NAME Run the test suite named NAME. The default is "main"
@@ -3837,7 +4216,8 @@ Options that pass on options
Options to run test on running server
extern Use running server for tests FIXME DANGEROUS
- ndbconnectstring=STR Use running cluster, and connect using STR
+ ndb-connectstring=STR Use running cluster, and connect using STR
+ ndb-connectstring-slave=STR Use running slave cluster, and connect using STR
user=USER User for connect to server
Options for debugging the product
@@ -3868,12 +4248,14 @@ Options for coverage, profiling etc
valgrind-mysqld Run the "mysqld" executable with valgrind
valgrind-options=ARGS Options to give valgrind, replaces default options
valgrind-path=[EXE] Path to the valgrind executable
+ callgrind Instruct valgrind to use callgrind
Misc options
comment=STR Write STR to the output
notimer Don't show test case execution time
script-debug Debug this script itself
+ verbose More verbose output
start-and-exit Only initialize and start the servers, using the
startup settings for the specified test case (if any)
start-dirty Only start the servers (without initialization) for
@@ -3886,15 +4268,14 @@ Misc options
testcase-timeout=MINUTES Max test case run time (default $default_testcase_timeout)
suite-timeout=MINUTES Max test suite run time (default $default_suite_timeout)
+
Deprecated options
with-openssl Deprecated option for ssl
Options not yet described, or that I want to look into more
local
- local-master
netware
- old-master
sleep=SECONDS
socket=PATH
user-test=s
diff --git a/mysql-test/mysql-test-run.sh b/mysql-test/mysql-test-run.sh
index 4cbb1bece9c..b74965b706e 100644
--- a/mysql-test/mysql-test-run.sh
+++ b/mysql-test/mysql-test-run.sh
@@ -2178,12 +2178,7 @@ then
USE_NDBCLUSTER_OPT=
fi
-# Do not automagically start daemons if we are in gdb or running only one
-# test case
- if [ -z "$DO_GDB" ] && [ -z "$DO_DDD" ]
- then
- mysql_start
- fi
+ mysql_start
$ECHO "Loading Standard Test Databases"
mysql_loadstd
fi
diff --git a/mysql-test/r/analyse.result b/mysql-test/r/analyse.result
index 3bb8e30fc0d..ba56a98b4a9 100644
--- a/mysql-test/r/analyse.result
+++ b/mysql-test/r/analyse.result
@@ -39,10 +39,10 @@ t2 CREATE TABLE `t2` (
`Field_name` varbinary(255) NOT NULL DEFAULT '',
`Min_value` varbinary(255) DEFAULT NULL,
`Max_value` varbinary(255) DEFAULT NULL,
- `Min_length` bigint(11) NOT NULL DEFAULT '0',
- `Max_length` bigint(11) NOT NULL DEFAULT '0',
- `Empties_or_zeros` bigint(11) NOT NULL DEFAULT '0',
- `Nulls` bigint(11) NOT NULL DEFAULT '0',
+ `Min_length` int(11) NOT NULL DEFAULT '0',
+ `Max_length` int(11) NOT NULL DEFAULT '0',
+ `Empties_or_zeros` int(11) NOT NULL DEFAULT '0',
+ `Nulls` int(11) NOT NULL DEFAULT '0',
`Avg_value_or_avg_length` varbinary(255) NOT NULL DEFAULT '',
`Std` varbinary(255) DEFAULT NULL,
`Optimal_fieldtype` varbinary(64) NOT NULL DEFAULT ''
@@ -58,10 +58,10 @@ t2 CREATE TABLE `t2` (
`Field_name` varbinary(255) NOT NULL DEFAULT '',
`Min_value` varbinary(255) DEFAULT NULL,
`Max_value` varbinary(255) DEFAULT NULL,
- `Min_length` bigint(11) NOT NULL DEFAULT '0',
- `Max_length` bigint(11) NOT NULL DEFAULT '0',
- `Empties_or_zeros` bigint(11) NOT NULL DEFAULT '0',
- `Nulls` bigint(11) NOT NULL DEFAULT '0',
+ `Min_length` int(11) NOT NULL DEFAULT '0',
+ `Max_length` int(11) NOT NULL DEFAULT '0',
+ `Empties_or_zeros` int(11) NOT NULL DEFAULT '0',
+ `Nulls` int(11) NOT NULL DEFAULT '0',
`Avg_value_or_avg_length` varbinary(255) NOT NULL DEFAULT '',
`Std` varbinary(255) DEFAULT NULL,
`Optimal_fieldtype` varbinary(64) NOT NULL DEFAULT ''
@@ -81,10 +81,10 @@ t2 CREATE TABLE `t2` (
`Field_name` varbinary(255) NOT NULL DEFAULT '',
`Min_value` varbinary(255) DEFAULT NULL,
`Max_value` varbinary(255) DEFAULT NULL,
- `Min_length` bigint(11) NOT NULL DEFAULT '0',
- `Max_length` bigint(11) NOT NULL DEFAULT '0',
- `Empties_or_zeros` bigint(11) NOT NULL DEFAULT '0',
- `Nulls` bigint(11) NOT NULL DEFAULT '0',
+ `Min_length` int(11) NOT NULL DEFAULT '0',
+ `Max_length` int(11) NOT NULL DEFAULT '0',
+ `Empties_or_zeros` int(11) NOT NULL DEFAULT '0',
+ `Nulls` int(11) NOT NULL DEFAULT '0',
`Avg_value_or_avg_length` varbinary(255) NOT NULL DEFAULT '',
`Std` varbinary(255) DEFAULT NULL,
`Optimal_fieldtype` varbinary(64) NOT NULL DEFAULT ''
diff --git a/mysql-test/r/archive.result b/mysql-test/r/archive.result
index cacf4aaf304..1dfec8ff713 100644
--- a/mysql-test/r/archive.result
+++ b/mysql-test/r/archive.result
@@ -13812,6 +13812,8 @@ select * from t1 where i between 2 and 4 and v in ('def','3r4f','lmn');
i v
4 3r4f
alter table t1 data directory="$MYSQLTEST_VARDIR/tmp";
+Warnings:
+Warning 0 DATA DIRECTORY option ignored
select * from t1;
i v
1 def
diff --git a/mysql-test/r/auto_increment.result b/mysql-test/r/auto_increment.result
index d9f4f98156c..1f13264ee55 100644
--- a/mysql-test/r/auto_increment.result
+++ b/mysql-test/r/auto_increment.result
@@ -143,7 +143,7 @@ explain extended select last_insert_id();
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select sql_no_cache last_insert_id() AS `last_insert_id()`
+Note 1003 select last_insert_id() AS `last_insert_id()`
insert into t1 set i = 254;
ERROR 23000: Duplicate entry '254' for key 'PRIMARY'
select last_insert_id();
@@ -153,7 +153,7 @@ insert into t1 set i = null;
ERROR 23000: Duplicate entry '255' for key 'PRIMARY'
select last_insert_id();
last_insert_id()
-0
+255
drop table t1;
create table t1 (i tinyint unsigned not null auto_increment, key (i));
insert into t1 set i = 254;
@@ -181,7 +181,7 @@ insert into t1 values (NULL, 10);
ERROR 23000: Duplicate entry '10' for key 'b'
select last_insert_id();
last_insert_id()
-0
+2
drop table t1;
create table t1(a int auto_increment,b int null,primary key(a));
SET SQL_MODE=NO_AUTO_VALUE_ON_ZERO;
@@ -446,3 +446,57 @@ INSERT INTO t1 VALUES(1, 1);
ALTER TABLE t1 CHANGE t1 t1 INT(10) auto_increment;
ERROR 23000: ALTER TABLE causes auto_increment resequencing, resulting in duplicate entry '1' for key 'PRIMARY'
DROP TABLE t1;
+CREATE TABLE `t2` (
+`k` int(11) NOT NULL auto_increment,
+`a` int(11) default NULL,
+`c` int(11) default NULL,
+PRIMARY KEY (`k`),
+UNIQUE KEY `idx_1` (`a`)
+) ENGINE=InnoDB;
+insert into t2 ( a ) values ( 6 ) on duplicate key update c =
+ifnull( c,
+0 ) + 1;
+insert into t2 ( a ) values ( 7 ) on duplicate key update c =
+ifnull( c,
+0 ) + 1;
+select last_insert_id();
+last_insert_id()
+2
+select * from t2;
+k a c
+1 6 NULL
+2 7 NULL
+insert into t2 ( a ) values ( 6 ) on duplicate key update c =
+ifnull( c,
+0 ) + 1;
+select last_insert_id();
+last_insert_id()
+1
+select * from t2;
+k a c
+1 6 1
+2 7 NULL
+insert ignore into t2 values (null,6,1),(10,8,1);
+select last_insert_id();
+last_insert_id()
+1
+insert ignore into t2 values (null,6,1),(null,8,1),(null,15,1),(null,20,1);
+select last_insert_id();
+last_insert_id()
+11
+select * from t2;
+k a c
+1 6 1
+2 7 NULL
+10 8 1
+11 15 1
+12 20 1
+drop table t2;
+create table t1 (a int primary key auto_increment, b int, c int, d timestamp default current_timestamp, unique(b),unique(c));
+insert into t1 values(null,1,1,now());
+insert into t1 values(null,0,0,null);
+replace into t1 values(null,1,0,null);
+select last_insert_id();
+last_insert_id()
+3
+drop table t1;
diff --git a/mysql-test/r/bigint.result b/mysql-test/r/bigint.result
index c76385fa334..c27ce18cfd3 100644
--- a/mysql-test/r/bigint.result
+++ b/mysql-test/r/bigint.result
@@ -174,7 +174,7 @@ create table t1 select 1 as 'a';
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
- `a` bigint(1) NOT NULL DEFAULT '0'
+ `a` int(1) NOT NULL DEFAULT '0'
) ENGINE=MyISAM DEFAULT CHARSET=latin1
drop table t1;
create table t1 select 9223372036854775809 as 'a';
diff --git a/mysql-test/r/binlog_row_binlog.result b/mysql-test/r/binlog_row_binlog.result
index 6cb086109b4..17c1d171b5c 100644
--- a/mysql-test/r/binlog_row_binlog.result
+++ b/mysql-test/r/binlog_row_binlog.result
@@ -235,3 +235,37 @@ master-bin.000001 # Rotate 1 # master-bin.000002;pos=4
show binlog events in 'master-bin.000002' from 102;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000002 # Query 1 # use `test`; drop table t1
+reset master;
+create table t1 (id tinyint auto_increment primary key);
+set insert_id=128;
+insert into t1 values(null);
+Warnings:
+Warning 1264 Out of range value for column 'id' at row 1
+select * from t1;
+id
+127
+drop table t1;
+create table t1 (a int not null auto_increment, primary key (a)) engine=myisam;
+set @@session.auto_increment_increment=1, @@session.auto_increment_offset=1;
+insert delayed into t1 values (207);
+insert delayed into t1 values (null);
+insert delayed into t1 values (300);
+select * from t1;
+a
+207
+208
+300
+show binlog events from 102;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Query 1 # use `test`; create table t1 (id tinyint auto_increment primary key)
+master-bin.000001 # Table_map 1 # table_id: # (test.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Query 1 # use `test`; drop table t1
+master-bin.000001 # Query 1 # use `test`; create table t1 (a int not null auto_increment, primary key (a)) engine=myisam
+master-bin.000001 # Table_map 1 # table_id: # (test.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (test.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (test.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+drop table t1;
diff --git a/mysql-test/r/binlog_row_blackhole.result b/mysql-test/r/binlog_row_blackhole.result
index 60a8802ecd0..a02aea4ea49 100644
--- a/mysql-test/r/binlog_row_blackhole.result
+++ b/mysql-test/r/binlog_row_blackhole.result
@@ -111,17 +111,13 @@ master-bin.000001 # Query 1 # use `test`; drop table t1,t2
master-bin.000001 # Query 1 # use `test`; create table t1 (a int) engine=blackhole
master-bin.000001 # Table_map 1 # table_id: # (test.t1)
master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 # Query 1 # use `test`; COMMIT
master-bin.000001 # Table_map 1 # table_id: # (test.t1)
master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 # Query 1 # use `test`; COMMIT
master-bin.000001 # Table_map 1 # table_id: # (test.t1)
master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 # Query 1 # use `test`; COMMIT
master-bin.000001 # Query 1 # use `test`; create table t2 (a varchar(200)) engine=blackhole
master-bin.000001 # Table_map 1 # table_id: # (test.t2)
master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 # Query 1 # use `test`; COMMIT
master-bin.000001 # Query 1 # use `test`; alter table t1 add b int
master-bin.000001 # Query 1 # use `test`; alter table t1 drop b
master-bin.000001 # Query 1 # use `test`; create table t3 like t1
@@ -145,8 +141,3 @@ master-bin.000001 # Table_map 1 # table_id: # (test.t1)
master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
master-bin.000001 # Query 1 # use `test`; COMMIT
drop table if exists t1;
-create table t1 (c char(20)) engine=MyISAM;
-insert into t1 values ("Monty"),("WAX"),("Walrus");
-alter table t1 engine=blackhole;
-ERROR HY000: Table storage engine for 't1' doesn't have this option
-drop table t1;
diff --git a/mysql-test/r/binlog_row_mix_innodb_myisam.result b/mysql-test/r/binlog_row_mix_innodb_myisam.result
index 32c21a01f27..ae66f98739d 100644
--- a/mysql-test/r/binlog_row_mix_innodb_myisam.result
+++ b/mysql-test/r/binlog_row_mix_innodb_myisam.result
@@ -8,12 +8,12 @@ insert into t2 select * from t1;
commit;
show binlog events from 102;
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 102 Query 1 # use `test`; BEGIN
-master-bin.000001 170 Table_map 1 # table_id: # (test.t1)
-master-bin.000001 209 Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 243 Table_map 1 # table_id: # (test.t2)
-master-bin.000001 282 Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 316 Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Query 1 # use `test`; BEGIN
+master-bin.000001 # Table_map 1 # table_id: # (test.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (test.t2)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
delete from t1;
delete from t2;
reset master;
@@ -25,12 +25,12 @@ Warnings:
Warning 1196 Some non-transactional changed tables couldn't be rolled back
show binlog events from 102;
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 102 Query 1 # use `test`; BEGIN
-master-bin.000001 170 Table_map 1 # table_id: # (test.t1)
-master-bin.000001 209 Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 243 Table_map 1 # table_id: # (test.t2)
-master-bin.000001 282 Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 316 Query 1 # use `test`; ROLLBACK
+master-bin.000001 # Query 1 # use `test`; BEGIN
+master-bin.000001 # Table_map 1 # table_id: # (test.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (test.t2)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Query 1 # use `test`; ROLLBACK
delete from t1;
delete from t2;
reset master;
@@ -45,16 +45,16 @@ Warning 1196 Some non-transactional changed tables couldn't be rolled back
commit;
show binlog events from 102;
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 102 Query 1 # use `test`; BEGIN
-master-bin.000001 170 Table_map 1 # table_id: # (test.t1)
-master-bin.000001 209 Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 243 Query 1 # use `test`; savepoint my_savepoint
-master-bin.000001 328 Table_map 1 # table_id: # (test.t1)
-master-bin.000001 367 Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 401 Table_map 1 # table_id: # (test.t2)
-master-bin.000001 440 Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 479 Query 1 # use `test`; rollback to savepoint my_savepoint
-master-bin.000001 576 Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Query 1 # use `test`; BEGIN
+master-bin.000001 # Table_map 1 # table_id: # (test.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Query 1 # use `test`; savepoint my_savepoint
+master-bin.000001 # Table_map 1 # table_id: # (test.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (test.t2)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Query 1 # use `test`; rollback to savepoint my_savepoint
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
delete from t1;
delete from t2;
reset master;
@@ -74,18 +74,18 @@ a
7
show binlog events from 102;
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 102 Query 1 # use `test`; BEGIN
-master-bin.000001 170 Table_map 1 # table_id: # (test.t1)
-master-bin.000001 209 Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 243 Query 1 # use `test`; savepoint my_savepoint
-master-bin.000001 328 Table_map 1 # table_id: # (test.t1)
-master-bin.000001 367 Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 401 Table_map 1 # table_id: # (test.t2)
-master-bin.000001 440 Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 479 Query 1 # use `test`; rollback to savepoint my_savepoint
-master-bin.000001 576 Table_map 1 # table_id: # (test.t1)
-master-bin.000001 615 Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 649 Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Query 1 # use `test`; BEGIN
+master-bin.000001 # Table_map 1 # table_id: # (test.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Query 1 # use `test`; savepoint my_savepoint
+master-bin.000001 # Table_map 1 # table_id: # (test.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (test.t2)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Query 1 # use `test`; rollback to savepoint my_savepoint
+master-bin.000001 # Table_map 1 # table_id: # (test.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
delete from t1;
delete from t2;
reset master;
@@ -100,12 +100,12 @@ get_lock("a",10)
1
show binlog events from 102;
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 102 Query 1 # use `test`; BEGIN
-master-bin.000001 170 Table_map 1 # table_id: # (test.t1)
-master-bin.000001 209 Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 243 Table_map 1 # table_id: # (test.t2)
-master-bin.000001 282 Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 316 Query 1 # use `test`; ROLLBACK
+master-bin.000001 # Query 1 # use `test`; BEGIN
+master-bin.000001 # Table_map 1 # table_id: # (test.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (test.t2)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Query 1 # use `test`; ROLLBACK
delete from t1;
delete from t2;
reset master;
@@ -113,11 +113,11 @@ insert into t1 values(9);
insert into t2 select * from t1;
show binlog events from 102;
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 102 Table_map 1 # table_id: # (test.t1)
-master-bin.000001 141 Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 175 Xid 1 # COMMIT /* xid= */
-master-bin.000001 202 Table_map 1 # table_id: # (test.t2)
-master-bin.000001 241 Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (test.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Table_map 1 # table_id: # (test.t2)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
delete from t1;
delete from t2;
reset master;
@@ -126,24 +126,24 @@ begin;
insert into t2 select * from t1;
show binlog events from 102;
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 102 Table_map 1 # table_id: # (test.t1)
-master-bin.000001 141 Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 175 Xid 1 # COMMIT /* xid= */
-master-bin.000001 202 Table_map 1 # table_id: # (test.t2)
-master-bin.000001 241 Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (test.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Table_map 1 # table_id: # (test.t2)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
insert into t1 values(11);
commit;
show binlog events from 102;
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 102 Table_map 1 # table_id: # (test.t1)
-master-bin.000001 141 Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 175 Xid 1 # COMMIT /* xid= */
-master-bin.000001 202 Table_map 1 # table_id: # (test.t2)
-master-bin.000001 241 Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 275 Query 1 # use `test`; BEGIN
-master-bin.000001 343 Table_map 1 # table_id: # (test.t1)
-master-bin.000001 382 Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 416 Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Table_map 1 # table_id: # (test.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Table_map 1 # table_id: # (test.t2)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Query 1 # use `test`; BEGIN
+master-bin.000001 # Table_map 1 # table_id: # (test.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
alter table t2 engine=INNODB;
delete from t1;
delete from t2;
@@ -154,12 +154,12 @@ insert into t2 select * from t1;
commit;
show binlog events from 102;
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 102 Query 1 # use `test`; BEGIN
-master-bin.000001 170 Table_map 1 # table_id: # (test.t1)
-master-bin.000001 209 Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 243 Table_map 1 # table_id: # (test.t2)
-master-bin.000001 282 Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 316 Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Query 1 # use `test`; BEGIN
+master-bin.000001 # Table_map 1 # table_id: # (test.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (test.t2)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
delete from t1;
delete from t2;
reset master;
@@ -181,10 +181,10 @@ rollback to savepoint my_savepoint;
commit;
show binlog events from 102;
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 102 Query 1 # use `test`; BEGIN
-master-bin.000001 170 Table_map 1 # table_id: # (test.t1)
-master-bin.000001 209 Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 243 Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Query 1 # use `test`; BEGIN
+master-bin.000001 # Table_map 1 # table_id: # (test.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
delete from t1;
delete from t2;
reset master;
@@ -202,12 +202,12 @@ a
18
show binlog events from 102;
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 102 Query 1 # use `test`; BEGIN
-master-bin.000001 170 Table_map 1 # table_id: # (test.t1)
-master-bin.000001 209 Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 243 Table_map 1 # table_id: # (test.t1)
-master-bin.000001 282 Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 316 Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Query 1 # use `test`; BEGIN
+master-bin.000001 # Table_map 1 # table_id: # (test.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (test.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
delete from t1;
delete from t2;
alter table t2 engine=MyISAM;
@@ -254,28 +254,28 @@ get_lock("lock1",60)
1
show binlog events from 102;
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 102 Query 1 # use `test`; BEGIN
-master-bin.000001 170 Table_map 1 # table_id: # (test.t1)
-master-bin.000001 209 Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 243 Table_map 1 # table_id: # (test.t1)
-master-bin.000001 282 Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 316 Xid 1 # COMMIT /* xid= */
-master-bin.000001 343 Table_map 1 # table_id: # (test.t1)
-master-bin.000001 382 Delete_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 421 Xid 1 # COMMIT /* xid= */
-master-bin.000001 448 Query 1 # use `test`; alter table t2 engine=MyISAM
-master-bin.000001 539 Table_map 1 # table_id: # (test.t1)
-master-bin.000001 578 Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 612 Xid 1 # COMMIT /* xid= */
-master-bin.000001 639 Table_map 1 # table_id: # (test.t2)
-master-bin.000001 678 Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 712 Query 1 # use `test`; drop table t1,t2
-master-bin.000001 791 Query 1 # use `test`; create table t0 (n int)
-master-bin.000001 877 Table_map 1 # table_id: # (test.t0)
-master-bin.000001 916 Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 950 Table_map 1 # table_id: # (test.t0)
-master-bin.000001 989 Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 1023 Query 1 # use `test`; create table t2 (n int) engine=innodb
+master-bin.000001 # Query 1 # use `test`; BEGIN
+master-bin.000001 # Table_map 1 # table_id: # (test.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (test.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Table_map 1 # table_id: # (test.t1)
+master-bin.000001 # Delete_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Query 1 # use `test`; alter table t2 engine=MyISAM
+master-bin.000001 # Table_map 1 # table_id: # (test.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Table_map 1 # table_id: # (test.t2)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Query 1 # use `test`; drop table t1,t2
+master-bin.000001 # Query 1 # use `test`; create table t0 (n int)
+master-bin.000001 # Table_map 1 # table_id: # (test.t0)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (test.t0)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Query 1 # use `test`; create table t2 (n int) engine=innodb
do release_lock("lock1");
drop table t0,t2;
set autocommit=0;
@@ -357,83 +357,55 @@ a b
DROP TABLE t1,t2;
show binlog events from 102;
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 102 Table_map 1 142 table_id: # (test.t1)
-master-bin.000001 142 Write_rows 1 189 table_id: # flags: STMT_END_F
-master-bin.000001 189 Query 1 257 use `test`; BEGIN
-master-bin.000001 257 Query 1 182 use `test`; CREATE TABLE `t2` (
+master-bin.000001 # Table_map 1 # table_id: # (test.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Query 1 # use `test`; BEGIN
+master-bin.000001 # Query 1 # use `test`; CREATE TABLE `t2` (
`a` int(11) NOT NULL DEFAULT '0',
`b` int(11) DEFAULT NULL,
PRIMARY KEY (`a`)
) ENGINE=InnoDB
-master-bin.000001 439 Table_map 1 222 table_id: # (test.t2)
-master-bin.000001 479 Write_rows 1 260 table_id: # flags: STMT_END_F
-master-bin.000001 517 Xid 1 544 COMMIT /* xid= */
-master-bin.000001 544 Query 1 630 use `test`; DROP TABLE if exists t2
-master-bin.000001 630 Table_map 1 670 table_id: # (test.t1)
-master-bin.000001 670 Write_rows 1 708 table_id: # flags: STMT_END_F
-master-bin.000001 708 Query 1 776 use `test`; BEGIN
-master-bin.000001 776 Query 1 192 use `test`; CREATE TEMPORARY TABLE `t2` (
+master-bin.000001 # Table_map 1 # table_id: # (test.t2)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Query 1 # use `test`; DROP TABLE if exists t2
+master-bin.000001 # Table_map 1 # table_id: # (test.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Query 1 # use `test`; DROP TABLE IF EXISTS t2
+master-bin.000001 # Query 1 # use `test`; CREATE TABLE t2 (a int, b int, primary key (a)) engine=innodb
+master-bin.000001 # Table_map 1 # table_id: # (test.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Query 1 # use `test`; BEGIN
+master-bin.000001 # Query 1 # use `test`; CREATE TABLE `t2` (
`a` int(11) NOT NULL DEFAULT '0',
`b` int(11) DEFAULT NULL,
PRIMARY KEY (`a`)
) ENGINE=InnoDB
-master-bin.000001 968 Query 1 1039 use `test`; ROLLBACK
-master-bin.000001 1039 Query 1 1125 use `test`; DROP TABLE IF EXISTS t2
-master-bin.000001 1125 Query 1 1249 use `test`; CREATE TABLE t2 (a int, b int, primary key (a)) engine=innodb
-master-bin.000001 1249 Table_map 1 1289 table_id: # (test.t1)
-master-bin.000001 1289 Write_rows 1 1327 table_id: # flags: STMT_END_F
-master-bin.000001 1327 Query 1 1395 use `test`; BEGIN
-master-bin.000001 1395 Query 1 182 use `test`; CREATE TABLE `t2` (
- `a` int(11) NOT NULL DEFAULT '0',
- `b` int(11) DEFAULT NULL,
- PRIMARY KEY (`a`)
-) ENGINE=InnoDB
-master-bin.000001 1577 Table_map 1 222 table_id: # (test.t2)
-master-bin.000001 1617 Write_rows 1 260 table_id: # flags: STMT_END_F
-master-bin.000001 1655 Xid 1 1682 COMMIT /* xid= */
-master-bin.000001 1682 Query 1 80 use `test`; TRUNCATE table t2
-master-bin.000001 1762 Xid 1 1789 COMMIT /* xid= */
-master-bin.000001 1789 Table_map 1 1829 table_id: # (test.t1)
-master-bin.000001 1829 Write_rows 1 1867 table_id: # flags: STMT_END_F
-master-bin.000001 1867 Query 1 1935 use `test`; BEGIN
-master-bin.000001 1935 Table_map 1 40 table_id: # (test.t2)
-master-bin.000001 1975 Write_rows 1 78 table_id: # flags: STMT_END_F
-master-bin.000001 2013 Xid 1 2040 COMMIT /* xid= */
-master-bin.000001 2040 Query 1 2116 use `test`; DROP TABLE t2
-master-bin.000001 2116 Table_map 1 2156 table_id: # (test.t1)
-master-bin.000001 2156 Write_rows 1 2194 table_id: # flags: STMT_END_F
-master-bin.000001 2194 Table_map 1 2234 table_id: # (test.t1)
-master-bin.000001 2234 Write_rows 1 2272 table_id: # flags: STMT_END_F
-master-bin.000001 2272 Table_map 1 2312 table_id: # (test.t1)
-master-bin.000001 2312 Write_rows 1 2350 table_id: # flags: STMT_END_F
-master-bin.000001 2350 Query 1 2418 use `test`; BEGIN
-master-bin.000001 2418 Query 1 192 use `test`; CREATE TEMPORARY TABLE `t2` (
- `a` int(11) NOT NULL DEFAULT '0',
- `b` int(11) DEFAULT NULL,
- PRIMARY KEY (`a`)
-) ENGINE=InnoDB
-master-bin.000001 2610 Xid 1 2637 COMMIT /* xid= */
-master-bin.000001 2637 Table_map 1 2677 table_id: # (test.t1)
-master-bin.000001 2677 Write_rows 1 2715 table_id: # flags: STMT_END_F
-master-bin.000001 2715 Query 1 2783 use `test`; BEGIN
-master-bin.000001 2783 Query 1 192 use `test`; CREATE TEMPORARY TABLE `t2` (
- `a` int(11) NOT NULL DEFAULT '0',
- `b` int(11) DEFAULT NULL,
- PRIMARY KEY (`a`)
-) ENGINE=InnoDB
-master-bin.000001 2975 Query 1 3046 use `test`; ROLLBACK
-master-bin.000001 3046 Query 1 80 use `test`; TRUNCATE table t2
-master-bin.000001 3126 Xid 1 3153 COMMIT /* xid= */
-master-bin.000001 3153 Table_map 1 3193 table_id: # (test.t1)
-master-bin.000001 3193 Write_rows 1 3231 table_id: # flags: STMT_END_F
-master-bin.000001 3231 Query 1 3299 use `test`; BEGIN
-master-bin.000001 3299 Query 1 192 use `test`; CREATE TEMPORARY TABLE `t2` (
- `a` int(11) NOT NULL DEFAULT '0',
- `b` int(11) DEFAULT NULL,
- PRIMARY KEY (`a`)
-) ENGINE=InnoDB
-master-bin.000001 3491 Xid 1 3518 COMMIT /* xid= */
-master-bin.000001 3518 Query 1 3622 use `test`; DROP TABLE `t1` /* generated by server */
+master-bin.000001 # Table_map 1 # table_id: # (test.t2)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Query 1 # use `test`; TRUNCATE table t2
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Table_map 1 # table_id: # (test.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Query 1 # use `test`; BEGIN
+master-bin.000001 # Table_map 1 # table_id: # (test.t2)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Query 1 # use `test`; DROP TABLE t2
+master-bin.000001 # Table_map 1 # table_id: # (test.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (test.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (test.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (test.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Query 1 # use `test`; TRUNCATE table t2
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Table_map 1 # table_id: # (test.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Query 1 # use `test`; DROP TABLE `t1` /* generated by server */
reset master;
create table t1 (a int) engine=innodb;
create table t2 (a int) engine=myisam;
diff --git a/mysql-test/r/binlog_stm_binlog.result b/mysql-test/r/binlog_stm_binlog.result
index f9180b69ab6..4e23db4828f 100644
--- a/mysql-test/r/binlog_stm_binlog.result
+++ b/mysql-test/r/binlog_stm_binlog.result
@@ -145,3 +145,35 @@ master-bin.000001 # Rotate 1 # master-bin.000002;pos=4
show binlog events in 'master-bin.000002' from 102;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000002 # Query 1 # use `test`; drop table t1
+reset master;
+create table t1 (id tinyint auto_increment primary key);
+set insert_id=128;
+insert into t1 values(null);
+Warnings:
+Warning 1264 Out of range value for column 'id' at row 1
+select * from t1;
+id
+127
+drop table t1;
+create table t1 (a int not null auto_increment, primary key (a)) engine=myisam;
+set @@session.auto_increment_increment=1, @@session.auto_increment_offset=1;
+insert delayed into t1 values (207);
+insert delayed into t1 values (null);
+insert delayed into t1 values (300);
+select * from t1;
+a
+207
+208
+300
+show binlog events from 102;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Query 1 # use `test`; create table t1 (id tinyint auto_increment primary key)
+master-bin.000001 # Intvar 1 # INSERT_ID=127
+master-bin.000001 # Query 1 # use `test`; insert into t1 values(null)
+master-bin.000001 # Query 1 # use `test`; drop table t1
+master-bin.000001 # Query 1 # use `test`; create table t1 (a int not null auto_increment, primary key (a)) engine=myisam
+master-bin.000001 # Query 1 # use `test`; insert delayed into t1 values (207)
+master-bin.000001 # Intvar 1 # INSERT_ID=208
+master-bin.000001 # Query 1 # use `test`; insert delayed into t1 values (null)
+master-bin.000001 # Query 1 # use `test`; insert delayed into t1 values (300)
+drop table t1;
diff --git a/mysql-test/r/binlog_stm_blackhole.result b/mysql-test/r/binlog_stm_blackhole.result
index 78bd2e8b7b3..309f45aed49 100644
--- a/mysql-test/r/binlog_stm_blackhole.result
+++ b/mysql-test/r/binlog_stm_blackhole.result
@@ -110,27 +110,18 @@ master-bin.000001 # Format_desc 1 # Server ver: VERSION, Binlog ver: 4
master-bin.000001 # Query 1 # use `test`; drop table t1,t2
master-bin.000001 # Query 1 # use `test`; create table t1 (a int) engine=blackhole
master-bin.000001 # Query 1 # use `test`; delete from t1 where a=10
-master-bin.000001 # Query 1 # use `test`; COMMIT
master-bin.000001 # Query 1 # use `test`; update t1 set a=11 where a=15
-master-bin.000001 # Query 1 # use `test`; COMMIT
master-bin.000001 # Query 1 # use `test`; insert into t1 values(1)
-master-bin.000001 # Query 1 # use `test`; COMMIT
master-bin.000001 # Query 1 # use `test`; insert ignore into t1 values(1)
-master-bin.000001 # Query 1 # use `test`; COMMIT
master-bin.000001 # Query 1 # use `test`; replace into t1 values(100)
-master-bin.000001 # Query 1 # use `test`; COMMIT
master-bin.000001 # Query 1 # use `test`; create table t2 (a varchar(200)) engine=blackhole
master-bin.000001 # Begin_load_query 1 # ;file_id=1;block_len=581
-master-bin.000001 # Query 1 # use `test`; COMMIT
master-bin.000001 # Execute_load_query 1 # use `test`; load data infile '../std_data_ln/words.dat' into table t2 ;file_id=1
-master-bin.000001 # Query 1 # use `test`; COMMIT
master-bin.000001 # Query 1 # use `test`; alter table t1 add b int
master-bin.000001 # Query 1 # use `test`; alter table t1 drop b
master-bin.000001 # Query 1 # use `test`; create table t3 like t1
master-bin.000001 # Query 1 # use `test`; insert into t1 select * from t3
-master-bin.000001 # Query 1 # use `test`; COMMIT
master-bin.000001 # Query 1 # use `test`; replace into t1 select * from t3
-master-bin.000001 # Query 1 # use `test`; COMMIT
drop table t1,t2,t3;
reset master;
create table t1 (a int) engine=blackhole;
@@ -150,8 +141,3 @@ master-bin.000001 # Query 1 # use `test`; BEGIN
master-bin.000001 # Query 1 # use `test`; insert into t1 values(1)
master-bin.000001 # Query 1 # use `test`; COMMIT
drop table if exists t1;
-create table t1 (c char(20)) engine=MyISAM;
-insert into t1 values ("Monty"),("WAX"),("Walrus");
-alter table t1 engine=blackhole;
-ERROR HY000: Table storage engine for 't1' doesn't have this option
-drop table t1;
diff --git a/mysql-test/r/binlog_stm_mix_innodb_myisam.result b/mysql-test/r/binlog_stm_mix_innodb_myisam.result
index e836cae0b15..95b6eb953a2 100644
--- a/mysql-test/r/binlog_stm_mix_innodb_myisam.result
+++ b/mysql-test/r/binlog_stm_mix_innodb_myisam.result
@@ -8,10 +8,10 @@ insert into t2 select * from t1;
commit;
show binlog events from 102;
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 102 Query 1 # use `test`; BEGIN
-master-bin.000001 170 Query 1 # use `test`; insert into t1 values(1)
-master-bin.000001 257 Query 1 # use `test`; insert into t2 select * from t1
-master-bin.000001 351 Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Query 1 # use `test`; BEGIN
+master-bin.000001 # Query 1 # use `test`; insert into t1 values(1)
+master-bin.000001 # Query 1 # use `test`; insert into t2 select * from t1
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
delete from t1;
delete from t2;
reset master;
@@ -23,10 +23,10 @@ Warnings:
Warning 1196 Some non-transactional changed tables couldn't be rolled back
show binlog events from 102;
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 102 Query 1 # use `test`; BEGIN
-master-bin.000001 170 Query 1 # use `test`; insert into t1 values(2)
-master-bin.000001 257 Query 1 # use `test`; insert into t2 select * from t1
-master-bin.000001 351 Query 1 # use `test`; ROLLBACK
+master-bin.000001 # Query 1 # use `test`; BEGIN
+master-bin.000001 # Query 1 # use `test`; insert into t1 values(2)
+master-bin.000001 # Query 1 # use `test`; insert into t2 select * from t1
+master-bin.000001 # Query 1 # use `test`; ROLLBACK
delete from t1;
delete from t2;
reset master;
@@ -41,13 +41,13 @@ Warning 1196 Some non-transactional changed tables couldn't be rolled back
commit;
show binlog events from 102;
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 102 Query 1 # use `test`; BEGIN
-master-bin.000001 170 Query 1 # use `test`; insert into t1 values(3)
-master-bin.000001 257 Query 1 # use `test`; savepoint my_savepoint
-master-bin.000001 342 Query 1 # use `test`; insert into t1 values(4)
-master-bin.000001 429 Query 1 # use `test`; insert into t2 select * from t1
-master-bin.000001 523 Query 1 # use `test`; rollback to savepoint my_savepoint
-master-bin.000001 620 Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Query 1 # use `test`; BEGIN
+master-bin.000001 # Query 1 # use `test`; insert into t1 values(3)
+master-bin.000001 # Query 1 # use `test`; savepoint my_savepoint
+master-bin.000001 # Query 1 # use `test`; insert into t1 values(4)
+master-bin.000001 # Query 1 # use `test`; insert into t2 select * from t1
+master-bin.000001 # Query 1 # use `test`; rollback to savepoint my_savepoint
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
delete from t1;
delete from t2;
reset master;
@@ -67,14 +67,14 @@ a
7
show binlog events from 102;
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 102 Query 1 # use `test`; BEGIN
-master-bin.000001 170 Query 1 # use `test`; insert into t1 values(5)
-master-bin.000001 257 Query 1 # use `test`; savepoint my_savepoint
-master-bin.000001 342 Query 1 # use `test`; insert into t1 values(6)
-master-bin.000001 429 Query 1 # use `test`; insert into t2 select * from t1
-master-bin.000001 523 Query 1 # use `test`; rollback to savepoint my_savepoint
-master-bin.000001 620 Query 1 # use `test`; insert into t1 values(7)
-master-bin.000001 707 Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Query 1 # use `test`; BEGIN
+master-bin.000001 # Query 1 # use `test`; insert into t1 values(5)
+master-bin.000001 # Query 1 # use `test`; savepoint my_savepoint
+master-bin.000001 # Query 1 # use `test`; insert into t1 values(6)
+master-bin.000001 # Query 1 # use `test`; insert into t2 select * from t1
+master-bin.000001 # Query 1 # use `test`; rollback to savepoint my_savepoint
+master-bin.000001 # Query 1 # use `test`; insert into t1 values(7)
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
delete from t1;
delete from t2;
reset master;
@@ -89,10 +89,10 @@ get_lock("a",10)
1
show binlog events from 102;
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 102 Query 1 # use `test`; BEGIN
-master-bin.000001 170 Query 1 # use `test`; insert into t1 values(8)
-master-bin.000001 257 Query 1 # use `test`; insert into t2 select * from t1
-master-bin.000001 351 Query 1 # use `test`; ROLLBACK
+master-bin.000001 # Query 1 # use `test`; BEGIN
+master-bin.000001 # Query 1 # use `test`; insert into t1 values(8)
+master-bin.000001 # Query 1 # use `test`; insert into t2 select * from t1
+master-bin.000001 # Query 1 # use `test`; ROLLBACK
delete from t1;
delete from t2;
reset master;
@@ -100,9 +100,9 @@ insert into t1 values(9);
insert into t2 select * from t1;
show binlog events from 102;
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 102 Query 1 # use `test`; insert into t1 values(9)
-master-bin.000001 189 Xid 1 # COMMIT /* xid= */
-master-bin.000001 216 Query 1 # use `test`; insert into t2 select * from t1
+master-bin.000001 # Query 1 # use `test`; insert into t1 values(9)
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Query 1 # use `test`; insert into t2 select * from t1
delete from t1;
delete from t2;
reset master;
@@ -111,19 +111,19 @@ begin;
insert into t2 select * from t1;
show binlog events from 102;
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 102 Query 1 # use `test`; insert into t1 values(10)
-master-bin.000001 190 Xid 1 # COMMIT /* xid= */
-master-bin.000001 217 Query 1 # use `test`; insert into t2 select * from t1
+master-bin.000001 # Query 1 # use `test`; insert into t1 values(10)
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Query 1 # use `test`; insert into t2 select * from t1
insert into t1 values(11);
commit;
show binlog events from 102;
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 102 Query 1 # use `test`; insert into t1 values(10)
-master-bin.000001 190 Xid 1 # COMMIT /* xid= */
-master-bin.000001 217 Query 1 # use `test`; insert into t2 select * from t1
-master-bin.000001 311 Query 1 # use `test`; BEGIN
-master-bin.000001 379 Query 1 # use `test`; insert into t1 values(11)
-master-bin.000001 467 Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Query 1 # use `test`; insert into t1 values(10)
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Query 1 # use `test`; insert into t2 select * from t1
+master-bin.000001 # Query 1 # use `test`; BEGIN
+master-bin.000001 # Query 1 # use `test`; insert into t1 values(11)
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
alter table t2 engine=INNODB;
delete from t1;
delete from t2;
@@ -134,10 +134,10 @@ insert into t2 select * from t1;
commit;
show binlog events from 102;
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 102 Query 1 # use `test`; BEGIN
-master-bin.000001 170 Query 1 # use `test`; insert into t1 values(12)
-master-bin.000001 258 Query 1 # use `test`; insert into t2 select * from t1
-master-bin.000001 352 Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Query 1 # use `test`; BEGIN
+master-bin.000001 # Query 1 # use `test`; insert into t1 values(12)
+master-bin.000001 # Query 1 # use `test`; insert into t2 select * from t1
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
delete from t1;
delete from t2;
reset master;
@@ -159,9 +159,9 @@ rollback to savepoint my_savepoint;
commit;
show binlog events from 102;
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 102 Query 1 # use `test`; BEGIN
-master-bin.000001 170 Query 1 # use `test`; insert into t1 values(14)
-master-bin.000001 258 Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Query 1 # use `test`; BEGIN
+master-bin.000001 # Query 1 # use `test`; insert into t1 values(14)
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
delete from t1;
delete from t2;
reset master;
@@ -179,10 +179,10 @@ a
18
show binlog events from 102;
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 102 Query 1 # use `test`; BEGIN
-master-bin.000001 170 Query 1 # use `test`; insert into t1 values(16)
-master-bin.000001 258 Query 1 # use `test`; insert into t1 values(18)
-master-bin.000001 346 Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Query 1 # use `test`; BEGIN
+master-bin.000001 # Query 1 # use `test`; insert into t1 values(16)
+master-bin.000001 # Query 1 # use `test`; insert into t1 values(18)
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
delete from t1;
delete from t2;
alter table t2 engine=MyISAM;
@@ -229,29 +229,29 @@ get_lock("lock1",60)
1
show binlog events from 102;
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 102 Query 1 # use `test`; BEGIN
-master-bin.000001 170 Query 1 # use `test`; insert into t1 values(16)
-master-bin.000001 258 Query 1 # use `test`; insert into t1 values(18)
-master-bin.000001 346 Xid 1 # COMMIT /* xid= */
-master-bin.000001 373 Query 1 # use `test`; delete from t1
-master-bin.000001 450 Xid 1 # COMMIT /* xid= */
-master-bin.000001 477 Query 1 # use `test`; delete from t2
-master-bin.000001 554 Xid 1 # COMMIT /* xid= */
-master-bin.000001 581 Query 1 # use `test`; alter table t2 engine=MyISAM
-master-bin.000001 672 Query 1 # use `test`; insert into t1 values (1)
-master-bin.000001 760 Xid 1 # COMMIT /* xid= */
-master-bin.000001 787 Query 1 # use `test`; insert into t2 values (20)
-master-bin.000001 876 Query 1 # use `test`; drop table t1,t2
-master-bin.000001 955 Query 1 # use `test`; create temporary table ti (a int) engine=innodb
-master-bin.000001 1065 Query 1 # use `test`; insert into ti values(1)
-master-bin.000001 1152 Xid 1 # COMMIT /* xid= */
-master-bin.000001 1179 Query 1 # use `test`; create temporary table t1 (a int) engine=myisam
-master-bin.000001 1289 Query 1 # use `test`; insert t1 values (1)
-master-bin.000001 1372 Query 1 # use `test`; create table t0 (n int)
-master-bin.000001 1458 Query 1 # use `test`; insert t0 select * from t1
-master-bin.000001 1547 Query 1 # use `test`; insert into t0 select GET_LOCK("lock1",null)
-master-bin.000001 1654 Query 1 # use `test`; create table t2 (n int) engine=innodb
-master-bin.000001 1754 Query 1 # use `test`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `test`.`t1`,`test`.`ti`
+master-bin.000001 # Query 1 # use `test`; BEGIN
+master-bin.000001 # Query 1 # use `test`; insert into t1 values(16)
+master-bin.000001 # Query 1 # use `test`; insert into t1 values(18)
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Query 1 # use `test`; delete from t1
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Query 1 # use `test`; delete from t2
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Query 1 # use `test`; alter table t2 engine=MyISAM
+master-bin.000001 # Query 1 # use `test`; insert into t1 values (1)
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Query 1 # use `test`; insert into t2 values (20)
+master-bin.000001 # Query 1 # use `test`; drop table t1,t2
+master-bin.000001 # Query 1 # use `test`; create temporary table ti (a int) engine=innodb
+master-bin.000001 # Query 1 # use `test`; insert into ti values(1)
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Query 1 # use `test`; create temporary table t1 (a int) engine=myisam
+master-bin.000001 # Query 1 # use `test`; insert t1 values (1)
+master-bin.000001 # Query 1 # use `test`; create table t0 (n int)
+master-bin.000001 # Query 1 # use `test`; insert t0 select * from t1
+master-bin.000001 # Query 1 # use `test`; insert into t0 select GET_LOCK("lock1",null)
+master-bin.000001 # Query 1 # use `test`; create table t2 (n int) engine=innodb
+master-bin.000001 # Query 1 # use `test`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `test`.`t1`,`test`.`ti`
do release_lock("lock1");
drop table t0,t2;
set autocommit=0;
@@ -333,28 +333,28 @@ a b
DROP TABLE t1,t2;
show binlog events from 102;
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 102 Query 1 198 use `test`; INSERT INTO t1 values (1,1),(1,2)
-master-bin.000001 198 Query 1 284 use `test`; DROP TABLE if exists t2
-master-bin.000001 284 Query 1 374 use `test`; INSERT INTO t1 values (3,3)
-master-bin.000001 374 Query 1 460 use `test`; DROP TABLE IF EXISTS t2
-master-bin.000001 460 Query 1 584 use `test`; CREATE TABLE t2 (a int, b int, primary key (a)) engine=innodb
-master-bin.000001 584 Query 1 674 use `test`; INSERT INTO t1 VALUES (4,4)
-master-bin.000001 674 Query 1 80 use `test`; TRUNCATE table t2
-master-bin.000001 754 Xid 1 781 COMMIT /* xid= */
-master-bin.000001 781 Query 1 871 use `test`; INSERT INTO t1 VALUES (5,5)
-master-bin.000001 871 Query 1 947 use `test`; DROP TABLE t2
-master-bin.000001 947 Query 1 1037 use `test`; INSERT INTO t1 values (6,6)
-master-bin.000001 1037 Query 1 1171 use `test`; CREATE TEMPORARY TABLE t2 (a int, b int, primary key (a)) engine=innodb
-master-bin.000001 1171 Query 1 1261 use `test`; INSERT INTO t1 values (7,7)
-master-bin.000001 1261 Query 1 1351 use `test`; INSERT INTO t1 values (8,8)
-master-bin.000001 1351 Query 1 1441 use `test`; INSERT INTO t1 values (9,9)
-master-bin.000001 1441 Query 1 80 use `test`; TRUNCATE table t2
-master-bin.000001 1521 Xid 1 1548 COMMIT /* xid= */
-master-bin.000001 1548 Query 1 1640 use `test`; INSERT INTO t1 values (10,10)
-master-bin.000001 1640 Query 1 1708 use `test`; BEGIN
-master-bin.000001 1708 Query 1 94 use `test`; INSERT INTO t2 values (100,100)
-master-bin.000001 1802 Xid 1 1829 COMMIT /* xid= */
-master-bin.000001 1829 Query 1 1908 use `test`; DROP TABLE t1,t2
+master-bin.000001 # Query 1 # use `test`; INSERT INTO t1 values (1,1),(1,2)
+master-bin.000001 # Query 1 # use `test`; DROP TABLE if exists t2
+master-bin.000001 # Query 1 # use `test`; INSERT INTO t1 values (3,3)
+master-bin.000001 # Query 1 # use `test`; DROP TABLE IF EXISTS t2
+master-bin.000001 # Query 1 # use `test`; CREATE TABLE t2 (a int, b int, primary key (a)) engine=innodb
+master-bin.000001 # Query 1 # use `test`; INSERT INTO t1 VALUES (4,4)
+master-bin.000001 # Query 1 # use `test`; TRUNCATE table t2
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Query 1 # use `test`; INSERT INTO t1 VALUES (5,5)
+master-bin.000001 # Query 1 # use `test`; DROP TABLE t2
+master-bin.000001 # Query 1 # use `test`; INSERT INTO t1 values (6,6)
+master-bin.000001 # Query 1 # use `test`; CREATE TEMPORARY TABLE t2 (a int, b int, primary key (a)) engine=innodb
+master-bin.000001 # Query 1 # use `test`; INSERT INTO t1 values (7,7)
+master-bin.000001 # Query 1 # use `test`; INSERT INTO t1 values (8,8)
+master-bin.000001 # Query 1 # use `test`; INSERT INTO t1 values (9,9)
+master-bin.000001 # Query 1 # use `test`; TRUNCATE table t2
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Query 1 # use `test`; INSERT INTO t1 values (10,10)
+master-bin.000001 # Query 1 # use `test`; BEGIN
+master-bin.000001 # Query 1 # use `test`; INSERT INTO t2 values (100,100)
+master-bin.000001 # Xid 1 # COMMIT /* xid= */
+master-bin.000001 # Query 1 # use `test`; DROP TABLE t1,t2
reset master;
create table t1 (a int) engine=innodb;
create table t2 (a int) engine=myisam;
diff --git a/mysql-test/r/create.result b/mysql-test/r/create.result
index c3710865b15..9ecaaa66cc3 100644
--- a/mysql-test/r/create.result
+++ b/mysql-test/r/create.result
@@ -465,7 +465,7 @@ t2 CREATE TABLE `t2` (
`ifnull(h,h)` decimal(5,4) DEFAULT NULL,
`ifnull(i,i)` year(4) DEFAULT NULL,
`ifnull(j,j)` date DEFAULT NULL,
- `ifnull(k,k)` datetime NOT NULL DEFAULT '0000-00-00 00:00:00',
+ `ifnull(k,k)` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00',
`ifnull(l,l)` datetime DEFAULT NULL,
`ifnull(m,m)` varchar(1) DEFAULT NULL,
`ifnull(n,n)` varchar(3) DEFAULT NULL,
@@ -620,7 +620,7 @@ create database mysqltest;
use mysqltest;
drop database mysqltest;
create table test.t1 like x;
-ERROR 42000: Incorrect database name 'NULL'
+ERROR 3D000: No database selected
drop table if exists test.t1;
create database mysqltest;
use mysqltest;
@@ -681,7 +681,7 @@ Table Create Table
t1 CREATE TABLE `t1` (
`b` int(11) NOT NULL,
`a` varchar(12) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL,
- `c` bigint(1) NOT NULL DEFAULT '0',
+ `c` int(1) NOT NULL DEFAULT '0',
PRIMARY KEY (`a`)
) ENGINE=MyISAM DEFAULT CHARSET=latin1
drop table t1;
@@ -694,7 +694,7 @@ Table Create Table
t1 CREATE TABLE `t1` (
`b` int(11) DEFAULT NULL,
`a` varchar(12) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL,
- `c` bigint(1) NOT NULL DEFAULT '0',
+ `c` int(1) NOT NULL DEFAULT '0',
PRIMARY KEY (`a`)
) ENGINE=MyISAM DEFAULT CHARSET=latin1
drop table t1;
diff --git a/mysql-test/r/create_not_windows.result b/mysql-test/r/create_not_windows.result
index 2d7fd30dfdd..abe76fd3fbe 100644
--- a/mysql-test/r/create_not_windows.result
+++ b/mysql-test/r/create_not_windows.result
@@ -12,3 +12,20 @@ about:text CREATE TABLE `about:text` (
PRIMARY KEY (`_id`)
) ENGINE=MyISAM DEFAULT CHARSET=latin1
drop table `about:text`;
+use test;
+drop table if exists t1;
+create table t1(a int) engine=myisam;
+insert into t1 values(1);
+"We get an error because the table is in the definition cache"
+create table t1(a int, b int);
+ERROR 42S01: Table 't1' already exists
+"Flush the cache and recreate the table anew to be able to drop it"
+flush tables;
+show open tables like "t%";
+Database Table In_use Name_locked
+create table t1(a int, b int, c int);
+"Try to select from the table. This should not crash the server"
+select count(a) from t1;
+count(a)
+0
+drop table t1;
diff --git a/mysql-test/r/ctype_ucs2_def.result b/mysql-test/r/ctype_ucs2_def.result
index 897dbac251c..2f9dc4ae616 100644
--- a/mysql-test/r/ctype_ucs2_def.result
+++ b/mysql-test/r/ctype_ucs2_def.result
@@ -1,3 +1,6 @@
+show variables like 'collation_server';
+Variable_name Value
+collation_server ucs2_unicode_ci
show variables like "%character_set_ser%";
Variable_name Value
character_set_server ucs2
diff --git a/mysql-test/r/date_formats.result b/mysql-test/r/date_formats.result
index 804bfabcf65..ee70327d17b 100644
--- a/mysql-test/r/date_formats.result
+++ b/mysql-test/r/date_formats.result
@@ -498,6 +498,22 @@ f1 f2
Warnings:
Warning 1292 Truncated incorrect date value: '2003-04-05 g'
Warning 1292 Truncated incorrect datetime value: '2003-04-05 10:11:12.101010234567'
+set names latin1;
+select date_format('2004-01-01','%W (%a), %e %M (%b) %Y');
+date_format('2004-01-01','%W (%a), %e %M (%b) %Y')
+Thursday (Thu), 1 January (Jan) 2004
+set lc_time_names=ru_RU;
+set names koi8r;
+select date_format('2004-01-01','%W (%a), %e %M (%b) %Y');
+date_format('2004-01-01','%W (%a), %e %M (%b) %Y')
+þÅÔ×ÅÒÇ (þÔ×), 1 ñÎ×ÁÒÑ (ñÎ×) 2004
+set lc_time_names=de_DE;
+set names latin1;
+select date_format('2004-01-01','%W (%a), %e %M (%b) %Y');
+date_format('2004-01-01','%W (%a), %e %M (%b) %Y')
+Donnerstag (Do), 1 Januar (Jan) 2004
+set names latin1;
+set lc_time_names=en_US;
create table t1 (f1 datetime);
insert into t1 (f1) values ("2005-01-01");
insert into t1 (f1) values ("2005-02-01");
@@ -551,3 +567,9 @@ TIME_FORMAT("24:00:00", '%l %p')
SELECT TIME_FORMAT("25:00:00", '%l %p');
TIME_FORMAT("25:00:00", '%l %p')
1 AM
+SELECT DATE_FORMAT('%Y-%m-%d %H:%i:%s', 1151414896);
+DATE_FORMAT('%Y-%m-%d %H:%i:%s', 1151414896)
+NULL
+Warnings:
+Warning 1292 Incorrect datetime value: '%Y-%m-%d %H:%i:%s'
+"End of 4.1 tests"
diff --git a/mysql-test/r/events_logs_tests.result b/mysql-test/r/events_logs_tests.result
index 9202d63fd2c..950090399d5 100644
--- a/mysql-test/r/events_logs_tests.result
+++ b/mysql-test/r/events_logs_tests.result
@@ -49,8 +49,8 @@ USER_HOST SLEEPVAL events_test SELECT SLEEP(2)
SET SESSION long_query_time=300;
"Make it quite long"
TRUNCATE mysql.slow_log;
-SET SESSION long_query_time=1;
CREATE TABLE slow_event_test (slo_val tinyint, val tinyint);
+SET SESSION long_query_time=1;
"This won't go to the slow log"
CREATE EVENT long_event ON SCHEDULE EVERY 1 MINUTE DO INSERT INTO slow_event_test SELECT @@long_query_time, SLEEP(3);
SELECT * FROM slow_event_test;
diff --git a/mysql-test/r/federated.result b/mysql-test/r/federated.result
index 5f5cd94c216..4bb279add69 100644
--- a/mysql-test/r/federated.result
+++ b/mysql-test/r/federated.result
@@ -1558,6 +1558,8 @@ id
3
4
5
+DROP TABLE federated.t1;
+DROP TABLE federated.t1;
DROP TABLE IF EXISTS federated.bug_17377_table;
CREATE TABLE federated.bug_17377_table (
`fld_cid` bigint(20) NOT NULL auto_increment,
@@ -1601,7 +1603,137 @@ fld_cid fld_name fld_parentid fld_delt
5 Torkel 0 0
DROP TABLE federated.t1;
DROP TABLE federated.bug_17377_table;
-DROP TABLE federated.t1;
+drop table if exists federated.t1;
+create table federated.t1 (a int, b int, c int);
+drop table if exists federated.t1;
+drop table if exists federated.t2;
+create table federated.t1 (a int, b int, c int) engine=federated connection='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1';
+create trigger federated.t1_bi before insert on federated.t1 for each row set new.c= new.a * new.b;
+create table federated.t2 (a int, b int);
+insert into federated.t2 values (13, 17), (19, 23);
+insert into federated.t1 (a, b) values (1, 2), (3, 5), (7, 11);
+select * from federated.t1 order by a;
+a b c
+1 2 2
+3 5 15
+7 11 77
+delete from federated.t1;
+insert into federated.t1 (a, b) select * from federated.t2;
+select * from federated.t1 order by a;
+a b c
+13 17 221
+19 23 437
+delete from federated.t1;
+load data infile '../std_data_ln/loaddata5.dat' into table federated.t1 fields terminated by '' enclosed by '' ignore 1 lines (a, b);
+select * from federated.t1 order by a;
+a b c
+3 4 12
+5 6 30
+drop tables federated.t1, federated.t2;
+drop table federated.t1;
+create table federated.t1 (i1 int, i2 int, i3 int);
+create table federated.t2 (id int, c1 varchar(20), c2 varchar(20));
+create table federated.t1 (i1 int, i2 int, i3 int) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1';
+create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t2';
+insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2);
+insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test");
+select * from federated.t1 order by i1;
+i1 i2 i3
+1 5 10
+2 2 2
+3 7 12
+4 5 2
+9 10 15
+select * from federated.t2;
+id c1 c2
+9 abc def
+5 opq lmn
+2 test t t test
+update federated.t1,federated.t2 set t1.i2=15, t2.c2="ppc" where t1.i1=t2.id;
+select * from federated.t1 order by i1;
+i1 i2 i3
+1 5 10
+2 15 2
+3 7 12
+4 5 2
+9 15 15
+select * from federated.t2 order by id;
+id c1 c2
+2 test t ppc
+5 opq lmn
+9 abc ppc
+delete federated.t1.*,federated.t2.* from federated.t1,federated.t2 where t1.i2=t2.id;
+select * from federated.t1 order by i1;
+i1 i2 i3
+2 15 2
+3 7 12
+9 15 15
+select * from federated.t2 order by id;
+id c1 c2
+2 test t ppc
+9 abc ppc
+drop table federated.t1, federated.t2;
+drop table federated.t1, federated.t2;
+create table federated.t1 (i1 int, i2 int, i3 int, primary key (i1));
+create table federated.t2 (id int, c1 varchar(20), c2 varchar(20), primary key (id));
+create table federated.t1 (i1 int auto_increment not null, i2 int, i3 int, primary key (i1)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1';
+create table federated.t2 (id int auto_increment not null, c1 varchar(20), c2 varchar(20), primary key(id)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t2';
+insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2);
+insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test");
+select * from federated.t1 order by i1;
+i1 i2 i3
+1 5 10
+2 2 2
+3 7 12
+4 5 2
+9 10 15
+select * from federated.t2 order by id;
+id c1 c2
+2 test t t test
+5 opq lmn
+9 abc def
+update federated.t1,federated.t2 set t1.i2=15, t2.c2="ppc" where t1.i1=t2.id;
+select * from federated.t1 order by i1;
+i1 i2 i3
+1 5 10
+2 15 2
+3 7 12
+4 5 2
+9 15 15
+select * from federated.t2 order by id;
+id c1 c2
+2 test t ppc
+5 opq lmn
+9 abc ppc
+delete federated.t1.*,federated.t2.* from federated.t1,federated.t2 where t1.i2=t2.id;
+select * from federated.t1 order by i1;
+i1 i2 i3
+2 15 2
+3 7 12
+9 15 15
+select * from federated.t2 order by id;
+id c1 c2
+2 test t ppc
+9 abc ppc
+drop table federated.t1, federated.t2;
+drop table federated.t1, federated.t2;
+create table t1 (id int not null auto_increment primary key, val int);
+create table t1
+(id int not null auto_increment primary key, val int) engine=federated
+connection='mysql://root@127.0.0.1:SLAVE_PORT/test/t1';
+insert into t1 values (1,0),(2,0);
+update t1 set val = NULL where id = 1;
+select * from t1;
+id val
+1 NULL
+2 0
+select * from t1;
+id val
+1 NULL
+2 0
+drop table t1;
+drop table t1;
+End of 5.0 tests
DROP TABLE IF EXISTS federated.t1;
DROP DATABASE IF EXISTS federated;
DROP TABLE IF EXISTS federated.t1;
diff --git a/mysql-test/r/func_compress.result b/mysql-test/r/func_compress.result
index d04495ad657..a4b780d146a 100644
--- a/mysql-test/r/func_compress.result
+++ b/mysql-test/r/func_compress.result
@@ -11,7 +11,7 @@ explain extended select uncompress(compress(@test_compress_string));
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select sql_no_cache uncompress(compress((@test_compress_string))) AS `uncompress(compress(@test_compress_string))`
+Note 1003 select uncompress(compress((@test_compress_string))) AS `uncompress(compress(@test_compress_string))`
select uncompressed_length(compress(@test_compress_string))=length(@test_compress_string);
uncompressed_length(compress(@test_compress_string))=length(@test_compress_string)
1
@@ -19,7 +19,7 @@ explain extended select uncompressed_length(compress(@test_compress_string))=len
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select sql_no_cache (uncompressed_length(compress((@test_compress_string))) = length((@test_compress_string))) AS `uncompressed_length(compress(@test_compress_string))=length(@test_compress_string)`
+Note 1003 select (uncompressed_length(compress((@test_compress_string))) = length((@test_compress_string))) AS `uncompressed_length(compress(@test_compress_string))=length(@test_compress_string)`
select uncompressed_length(compress(@test_compress_string));
uncompressed_length(compress(@test_compress_string))
117
diff --git a/mysql-test/r/func_gconcat.result b/mysql-test/r/func_gconcat.result
index d33ecfae494..675e0faa1a0 100644
--- a/mysql-test/r/func_gconcat.result
+++ b/mysql-test/r/func_gconcat.result
@@ -647,3 +647,16 @@ select charset(group_concat(c1 order by c2)) from t1;
charset(group_concat(c1 order by c2))
latin1
drop table t1;
+CREATE TABLE t1 (a INT(10), b LONGTEXT, PRIMARY KEY (a));
+SET GROUP_CONCAT_MAX_LEN = 20000000;
+INSERT INTO t1 VALUES (1,REPEAT(CONCAT('A',CAST(CHAR(0) AS BINARY),'B'), 40000));
+INSERT INTO t1 SELECT a + 1, b FROM t1;
+SELECT a, CHAR_LENGTH(b) FROM t1;
+a CHAR_LENGTH(b)
+1 120000
+2 120000
+SELECT CHAR_LENGTH( GROUP_CONCAT(b) ) FROM t1;
+CHAR_LENGTH( GROUP_CONCAT(b) )
+240001
+SET GROUP_CONCAT_MAX_LEN = 1024;
+DROP TABLE t1;
diff --git a/mysql-test/r/func_group.result b/mysql-test/r/func_group.result
index cee99fe7d0d..74da6478470 100644
--- a/mysql-test/r/func_group.result
+++ b/mysql-test/r/func_group.result
@@ -653,15 +653,6 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 range k2 k2 4 NULL 6 Using where; Using index
1 SIMPLE t1 index NULL PRIMARY 3 NULL 15 Using index
drop table t1, t2;
-create table t1 (USR_ID integer not null, MAX_REQ integer not null, constraint PK_SEA_USER primary key (USR_ID)) engine=InnoDB;
-insert into t1 values (1, 3);
-select count(*) + MAX_REQ - MAX_REQ + MAX_REQ - MAX_REQ + MAX_REQ - MAX_REQ + MAX_REQ - MAX_REQ + MAX_REQ - MAX_REQ from t1 group by MAX_REQ;
-count(*) + MAX_REQ - MAX_REQ + MAX_REQ - MAX_REQ + MAX_REQ - MAX_REQ + MAX_REQ - MAX_REQ + MAX_REQ - MAX_REQ
-1
-select Case When Count(*) < MAX_REQ Then 1 Else 0 End from t1 where t1.USR_ID = 1 group by MAX_REQ;
-Case When Count(*) < MAX_REQ Then 1 Else 0 End
-1
-drop table t1;
create table t1 (a char(10));
insert into t1 values ('a'),('b'),('c');
select coercibility(max(a)) from t1;
@@ -988,3 +979,14 @@ SUM(a)
6
DROP TABLE t1;
set div_precision_increment= @sav_dpi;
+CREATE TABLE t1 (a INT PRIMARY KEY, b INT);
+INSERT INTO t1 VALUES (1,1), (2,2);
+CREATE TABLE t2 (a INT PRIMARY KEY, b INT);
+INSERT INTO t2 VALUES (1,1), (3,3);
+SELECT SQL_NO_CACHE
+(SELECT SUM(c.a) FROM t1 ttt, t2 ccc
+WHERE ttt.a = ccc.b AND ttt.a = t.a GROUP BY ttt.a) AS minid
+FROM t1 t, t2 c WHERE t.a = c.b;
+minid
+NULL
+DROP TABLE t1,t2;
diff --git a/mysql-test/r/func_group_innodb.result b/mysql-test/r/func_group_innodb.result
new file mode 100644
index 00000000000..5f12a437eed
--- /dev/null
+++ b/mysql-test/r/func_group_innodb.result
@@ -0,0 +1,147 @@
+create table t1 (USR_ID integer not null, MAX_REQ integer not null, constraint PK_SEA_USER primary key (USR_ID)) engine=InnoDB;
+insert into t1 values (1, 3);
+select count(*) + MAX_REQ - MAX_REQ + MAX_REQ - MAX_REQ + MAX_REQ - MAX_REQ + MAX_REQ - MAX_REQ + MAX_REQ - MAX_REQ from t1 group by MAX_REQ;
+count(*) + MAX_REQ - MAX_REQ + MAX_REQ - MAX_REQ + MAX_REQ - MAX_REQ + MAX_REQ - MAX_REQ + MAX_REQ - MAX_REQ
+1
+select Case When Count(*) < MAX_REQ Then 1 Else 0 End from t1 where t1.USR_ID = 1 group by MAX_REQ;
+Case When Count(*) < MAX_REQ Then 1 Else 0 End
+1
+drop table t1;
+create table t1m (a int) engine=myisam;
+create table t1i (a int) engine=innodb;
+create table t2m (a int) engine=myisam;
+create table t2i (a int) engine=innodb;
+insert into t2m values (5);
+insert into t2i values (5);
+select min(a) from t1m;
+min(a)
+NULL
+select min(7) from t1m;
+min(7)
+NULL
+select min(7) from DUAL;
+min(7)
+NULL
+explain select min(7) from t2m join t1m;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
+select min(7) from t2m join t1m;
+min(7)
+NULL
+select max(a) from t1m;
+max(a)
+NULL
+select max(7) from t1m;
+max(7)
+NULL
+select max(7) from DUAL;
+max(7)
+NULL
+explain select max(7) from t2m join t1m;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
+select max(7) from t2m join t1m;
+max(7)
+NULL
+select 1, min(a) from t1m where a=99;
+1 min(a)
+1 NULL
+select 1, min(a) from t1m where 1=99;
+1 min(a)
+1 NULL
+select 1, min(1) from t1m where a=99;
+1 min(1)
+1 NULL
+select 1, min(1) from t1m where 1=99;
+1 min(1)
+1 NULL
+select 1, max(a) from t1m where a=99;
+1 max(a)
+1 NULL
+select 1, max(a) from t1m where 1=99;
+1 max(a)
+1 NULL
+select 1, max(1) from t1m where a=99;
+1 max(1)
+1 NULL
+select 1, max(1) from t1m where 1=99;
+1 max(1)
+1 NULL
+select min(a) from t1i;
+min(a)
+NULL
+select min(7) from t1i;
+min(7)
+NULL
+select min(7) from DUAL;
+min(7)
+NULL
+explain select min(7) from t2i join t1i;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2i ALL NULL NULL NULL NULL 1
+1 SIMPLE t1i ALL NULL NULL NULL NULL 1
+select min(7) from t2i join t1i;
+min(7)
+NULL
+select max(a) from t1i;
+max(a)
+NULL
+select max(7) from t1i;
+max(7)
+NULL
+select max(7) from DUAL;
+max(7)
+NULL
+explain select max(7) from t2i join t1i;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2i ALL NULL NULL NULL NULL 1
+1 SIMPLE t1i ALL NULL NULL NULL NULL 1
+select max(7) from t2i join t1i;
+max(7)
+NULL
+select 1, min(a) from t1i where a=99;
+1 min(a)
+1 NULL
+select 1, min(a) from t1i where 1=99;
+1 min(a)
+1 NULL
+select 1, min(1) from t1i where a=99;
+1 min(1)
+1 NULL
+select 1, min(1) from t1i where 1=99;
+1 min(1)
+1 NULL
+select 1, max(a) from t1i where a=99;
+1 max(a)
+1 NULL
+select 1, max(a) from t1i where 1=99;
+1 max(a)
+1 NULL
+select 1, max(1) from t1i where a=99;
+1 max(1)
+1 NULL
+select 1, max(1) from t1i where 1=99;
+1 max(1)
+1 NULL
+explain select count(*), min(7), max(7) from t1m, t1i;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1m system NULL NULL NULL NULL 0 const row not found
+1 SIMPLE t1i ALL NULL NULL NULL NULL 1
+select count(*), min(7), max(7) from t1m, t1i;
+count(*) min(7) max(7)
+0 NULL NULL
+explain select count(*), min(7), max(7) from t1m, t2i;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1m system NULL NULL NULL NULL 0 const row not found
+1 SIMPLE t2i ALL NULL NULL NULL NULL 1
+select count(*), min(7), max(7) from t1m, t2i;
+count(*) min(7) max(7)
+0 NULL NULL
+explain select count(*), min(7), max(7) from t2m, t1i;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2m system NULL NULL NULL NULL 1
+1 SIMPLE t1i ALL NULL NULL NULL NULL 1
+select count(*), min(7), max(7) from t2m, t1i;
+count(*) min(7) max(7)
+0 NULL NULL
+drop table t1m, t1i, t2m, t2i;
diff --git a/mysql-test/r/func_math.result b/mysql-test/r/func_math.result
index 94979258bca..4bd18ae589e 100644
--- a/mysql-test/r/func_math.result
+++ b/mysql-test/r/func_math.result
@@ -90,7 +90,7 @@ explain extended select rand(999999),rand();
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select sql_no_cache rand(999999) AS `rand(999999)`,rand() AS `rand()`
+Note 1003 select rand(999999) AS `rand(999999)`,rand() AS `rand()`
select pi(),format(sin(pi()/2),6),format(cos(pi()/2),6),format(abs(tan(pi())),6),format(cot(1),6),format(asin(1),6),format(acos(0),6),format(atan(1),6);
pi() format(sin(pi()/2),6) format(cos(pi()/2),6) format(abs(tan(pi())),6) format(cot(1),6) format(asin(1),6) format(acos(0),6) format(atan(1),6)
3.141593 1.000000 0.000000 0.000000 0.642093 1.570796 1.570796 0.785398
diff --git a/mysql-test/r/func_sapdb.result b/mysql-test/r/func_sapdb.result
index 0be9ea9cf86..7e9bba9710c 100644
--- a/mysql-test/r/func_sapdb.result
+++ b/mysql-test/r/func_sapdb.result
@@ -81,6 +81,12 @@ makedate(1997,1)
select makedate(1997,0);
makedate(1997,0)
NULL
+select makedate(9999,365);
+makedate(9999,365)
+9999-12-31
+select makedate(9999,366);
+makedate(9999,366)
+NULL
select addtime("1997-12-31 23:59:59.999999", "1 1:1:1.000002");
addtime("1997-12-31 23:59:59.999999", "1 1:1:1.000002")
1998-01-02 01:01:01.000001
diff --git a/mysql-test/r/func_str.result b/mysql-test/r/func_str.result
index 731fe465932..5c95cd3dac3 100644
--- a/mysql-test/r/func_str.result
+++ b/mysql-test/r/func_str.result
@@ -33,6 +33,9 @@ instr('hello','HE') instr('hello',binary 'HE') instr(binary 'hello','HE')
select position(binary 'll' in 'hello'),position('a' in binary 'hello');
position(binary 'll' in 'hello') position('a' in binary 'hello')
3 0
+select left('hello',null), right('hello',null);
+left('hello',null) right('hello',null)
+NULL NULL
select left('hello',2),right('hello',2),substring('hello',2,2),mid('hello',1,5) ;
left('hello',2) right('hello',2) substring('hello',2,2) mid('hello',1,5)
he lo el hello
@@ -1038,6 +1041,49 @@ select * from t1 where f1='test' and (f2= sha("TEST") or f2= sha("test"));
f1 f2
test a94a8fe5ccb19ba61c4c0873d391e987982fbbd3
drop table t1;
+CREATE TABLE t1 (a varchar(10));
+INSERT INTO t1 VALUES ('abc'), ('xyz');
+SELECT a, CONCAT(a,' ',a) AS c FROM t1
+HAVING LEFT(c,LENGTH(c)-INSTR(REVERSE(c)," ")) = a;
+a c
+abc abc abc
+xyz xyz xyz
+SELECT a, CONCAT(a,' ',a) AS c FROM t1
+HAVING LEFT(CONCAT(a,' ',a),
+LENGTH(CONCAT(a,' ',a))-
+INSTR(REVERSE(CONCAT(a,' ',a))," ")) = a;
+a c
+abc abc abc
+xyz xyz xyz
+DROP TABLE t1;
+CREATE TABLE t1 (s varchar(10));
+INSERT INTO t1 VALUES ('yadda'), ('yaddy');
+EXPLAIN EXTENDED SELECT s FROM t1 WHERE TRIM(s) > 'ab';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`s` AS `s` from `test`.`t1` where (trim(`test`.`t1`.`s`) > _latin1'ab')
+EXPLAIN EXTENDED SELECT s FROM t1 WHERE TRIM('y' FROM s) > 'ab';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`s` AS `s` from `test`.`t1` where (trim(both _latin1'y' from `test`.`t1`.`s`) > _latin1'ab')
+EXPLAIN EXTENDED SELECT s FROM t1 WHERE TRIM(LEADING 'y' FROM s) > 'ab';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`s` AS `s` from `test`.`t1` where (trim(leading _latin1'y' from `test`.`t1`.`s`) > _latin1'ab')
+EXPLAIN EXTENDED SELECT s FROM t1 WHERE TRIM(TRAILING 'y' FROM s) > 'ab';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`s` AS `s` from `test`.`t1` where (trim(trailing _latin1'y' from `test`.`t1`.`s`) > _latin1'ab')
+EXPLAIN EXTENDED SELECT s FROM t1 WHERE TRIM(BOTH 'y' FROM s) > 'ab';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`s` AS `s` from `test`.`t1` where (trim(both _latin1'y' from `test`.`t1`.`s`) > _latin1'ab')
+DROP TABLE t1;
End of 4.1 tests
create table t1 (d decimal default null);
insert into t1 values (null);
diff --git a/mysql-test/r/func_system.result b/mysql-test/r/func_system.result
index 40261a7e9c6..fa05021eb47 100644
--- a/mysql-test/r/func_system.result
+++ b/mysql-test/r/func_system.result
@@ -41,7 +41,7 @@ explain extended select database(), user();
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select sql_no_cache database() AS `database()`,user() AS `user()`
+Note 1003 select database() AS `database()`,user() AS `user()`
create table t1 (version char(60)) select database(), user(), version() as 'version';
show create table t1;
Table Create Table
diff --git a/mysql-test/r/func_time.result b/mysql-test/r/func_time.result
index 6fe881111fb..b6a3a6462ab 100644
--- a/mysql-test/r/func_time.result
+++ b/mysql-test/r/func_time.result
@@ -1,4 +1,5 @@
drop table if exists t1,t2,t3;
+set time_zone="+03:00";
select from_days(to_days("960101")),to_days(960201)-to_days("19960101"),to_days(date_add(curdate(), interval 1 day))-to_days(curdate()),weekday("1997-11-29");
from_days(to_days("960101")) to_days(960201)-to_days("19960101") to_days(date_add(curdate(), interval 1 day))-to_days(curdate()) weekday("1997-11-29")
1996-01-01 31 1 5
@@ -396,6 +397,12 @@ quarter
SELECT EXTRACT(QUARTER FROM '2004-12-15') AS quarter;
quarter
4
+SELECT DATE_SUB(str_to_date('9999-12-31 00:01:00','%Y-%m-%d %H:%i:%s'), INTERVAL 1 MINUTE);
+DATE_SUB(str_to_date('9999-12-31 00:01:00','%Y-%m-%d %H:%i:%s'), INTERVAL 1 MINUTE)
+9999-12-31 00:00:00
+SELECT DATE_ADD(str_to_date('9999-12-30 23:59:00','%Y-%m-%d %H:%i:%s'), INTERVAL 1 MINUTE);
+DATE_ADD(str_to_date('9999-12-30 23:59:00','%Y-%m-%d %H:%i:%s'), INTERVAL 1 MINUTE)
+9999-12-31 00:00:00
SELECT "1900-01-01 00:00:00" + INTERVAL 2147483648 SECOND;
"1900-01-01 00:00:00" + INTERVAL 2147483648 SECOND
1968-01-20 03:14:08
@@ -722,7 +729,7 @@ explain extended select period_add("9602",-12),period_diff(199505,"9404"),from_d
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select sql_no_cache period_add(_latin1'9602',-(12)) AS `period_add("9602",-12)`,period_diff(199505,_latin1'9404') AS `period_diff(199505,"9404")`,from_days(to_days(_latin1'960101')) AS `from_days(to_days("960101"))`,dayofmonth(_latin1'1997-01-02') AS `dayofmonth("1997-01-02")`,month(_latin1'1997-01-02') AS `month("1997-01-02")`,monthname(_latin1'1972-03-04') AS `monthname("1972-03-04")`,dayofyear(_latin1'0000-00-00') AS `dayofyear("0000-00-00")`,hour(_latin1'1997-03-03 23:03:22') AS `HOUR("1997-03-03 23:03:22")`,minute(_latin1'23:03:22') AS `MINUTE("23:03:22")`,second(230322) AS `SECOND(230322)`,quarter(980303) AS `QUARTER(980303)`,week(_latin1'1998-03-03',0) AS `WEEK("1998-03-03")`,yearweek(_latin1'2000-01-01',1) AS `yearweek("2000-01-01",1)`,week(19950101,1) AS `week(19950101,1)`,year(_latin1'98-02-03') AS `year("98-02-03")`,(weekday(curdate()) - weekday(now())) AS `weekday(curdate())-weekday(now())`,dayname(_latin1'1962-03-03') AS `dayname("1962-03-03")`,unix_timestamp() AS `unix_timestamp()`,sec_to_time((time_to_sec(_latin1'0:30:47') / 6.21)) AS `sec_to_time(time_to_sec("0:30:47")/6.21)`,curtime() AS `curtime()`,utc_time() AS `utc_time()`,curdate() AS `curdate()`,utc_date() AS `utc_date()`,utc_timestamp() AS `utc_timestamp()`,date_format(_latin1'1997-01-02 03:04:05',_latin1'%M %W %D %Y %y %m %d %h %i %s %w') AS `date_format("1997-01-02 03:04:05", "%M %W %D %Y %y %m %d %h %i %s %w")`,from_unixtime(unix_timestamp(_latin1'1994-03-02 10:11:12')) AS `from_unixtime(unix_timestamp("1994-03-02 10:11:12"))`,(_latin1'1997-12-31 23:59:59' + interval 1 second) AS `"1997-12-31 23:59:59" + INTERVAL 1 SECOND`,(_latin1'1998-01-01 00:00:00' - interval 1 second) AS `"1998-01-01 00:00:00" - INTERVAL 1 SECOND`,(_latin1'1997-12-31' + interval 1 day) AS `INTERVAL 1 DAY + "1997-12-31"`,extract(year from _latin1'1999-01-02 10:11:12') AS `extract(YEAR FROM "1999-01-02 10:11:12")`,(_latin1'1997-12-31 23:59:59' + interval 1 second) AS `date_add("1997-12-31 23:59:59",INTERVAL 1 SECOND)`
+Note 1003 select period_add(_latin1'9602',-(12)) AS `period_add("9602",-12)`,period_diff(199505,_latin1'9404') AS `period_diff(199505,"9404")`,from_days(to_days(_latin1'960101')) AS `from_days(to_days("960101"))`,dayofmonth(_latin1'1997-01-02') AS `dayofmonth("1997-01-02")`,month(_latin1'1997-01-02') AS `month("1997-01-02")`,monthname(_latin1'1972-03-04') AS `monthname("1972-03-04")`,dayofyear(_latin1'0000-00-00') AS `dayofyear("0000-00-00")`,hour(_latin1'1997-03-03 23:03:22') AS `HOUR("1997-03-03 23:03:22")`,minute(_latin1'23:03:22') AS `MINUTE("23:03:22")`,second(230322) AS `SECOND(230322)`,quarter(980303) AS `QUARTER(980303)`,week(_latin1'1998-03-03',0) AS `WEEK("1998-03-03")`,yearweek(_latin1'2000-01-01',1) AS `yearweek("2000-01-01",1)`,week(19950101,1) AS `week(19950101,1)`,year(_latin1'98-02-03') AS `year("98-02-03")`,(weekday(curdate()) - weekday(now())) AS `weekday(curdate())-weekday(now())`,dayname(_latin1'1962-03-03') AS `dayname("1962-03-03")`,unix_timestamp() AS `unix_timestamp()`,sec_to_time((time_to_sec(_latin1'0:30:47') / 6.21)) AS `sec_to_time(time_to_sec("0:30:47")/6.21)`,curtime() AS `curtime()`,utc_time() AS `utc_time()`,curdate() AS `curdate()`,utc_date() AS `utc_date()`,utc_timestamp() AS `utc_timestamp()`,date_format(_latin1'1997-01-02 03:04:05',_latin1'%M %W %D %Y %y %m %d %h %i %s %w') AS `date_format("1997-01-02 03:04:05", "%M %W %D %Y %y %m %d %h %i %s %w")`,from_unixtime(unix_timestamp(_latin1'1994-03-02 10:11:12')) AS `from_unixtime(unix_timestamp("1994-03-02 10:11:12"))`,(_latin1'1997-12-31 23:59:59' + interval 1 second) AS `"1997-12-31 23:59:59" + INTERVAL 1 SECOND`,(_latin1'1998-01-01 00:00:00' - interval 1 second) AS `"1998-01-01 00:00:00" - INTERVAL 1 SECOND`,(_latin1'1997-12-31' + interval 1 day) AS `INTERVAL 1 DAY + "1997-12-31"`,extract(year from _latin1'1999-01-02 10:11:12') AS `extract(YEAR FROM "1999-01-02 10:11:12")`,(_latin1'1997-12-31 23:59:59' + interval 1 second) AS `date_add("1997-12-31 23:59:59",INTERVAL 1 SECOND)`
SET @TMP=NOW();
CREATE TABLE t1 (d DATETIME);
INSERT INTO t1 VALUES (NOW());
@@ -932,6 +939,7 @@ id day id day
1 2005-06-01 3 2005-07-15
3 2005-07-01 3 2005-07-15
DROP TABLE t1,t2;
+set time_zone= @@global.time_zone;
End of 5.0 tests
select date_sub("0050-01-01 00:00:01",INTERVAL 2 SECOND);
date_sub("0050-01-01 00:00:01",INTERVAL 2 SECOND)
diff --git a/mysql-test/r/func_timestamp.result b/mysql-test/r/func_timestamp.result
index d9912f08b72..495fedea9e6 100644
--- a/mysql-test/r/func_timestamp.result
+++ b/mysql-test/r/func_timestamp.result
@@ -1,4 +1,5 @@
drop table if exists t1;
+set time_zone="+03:00";
create table t1 (Zeit time, Tag tinyint not null, Monat tinyint not null,
Jahr smallint not null, index(Tag), index(Monat), index(Jahr) );
insert into t1 values ("09:26:00",16,9,1998),("09:26:00",16,9,1998);
@@ -9,3 +10,4 @@ Date Unix
1998-9-16 09:26:00 905927160
1998-9-16 09:26:00 905927160
drop table t1;
+set time_zone= @@global.time_zone;
diff --git a/mysql-test/r/gis-rtree.result b/mysql-test/r/gis-rtree.result
index cd6a2510001..e2e7a612dec 100644
--- a/mysql-test/r/gis-rtree.result
+++ b/mysql-test/r/gis-rtree.result
@@ -816,3 +816,43 @@ check table t1 extended;
Table Op Msg_type Msg_text
test.t1 check status OK
drop table t1;
+CREATE TABLE t1 (
+c1 geometry NOT NULL default '',
+SPATIAL KEY i1 (c1(32))
+) ENGINE=MyISAM DEFAULT CHARSET=latin1;
+INSERT INTO t1 (c1) VALUES (
+PolygonFromText('POLYGON((-18.6086111000 -66.9327777000,
+ -18.6055555000 -66.8158332999,
+ -18.7186111000 -66.8102777000,
+ -18.7211111000 -66.9269443999,
+ -18.6086111000 -66.9327777000))'));
+CHECK TABLE t1 EXTENDED;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+DROP TABLE t1;
+CREATE TABLE t1 (
+c1 geometry NOT NULL default '',
+SPATIAL KEY i1 (c1(32))
+) ENGINE=MyISAM DEFAULT CHARSET=latin1;
+INSERT INTO t1 (c1) VALUES (
+PolygonFromText('POLYGON((-18.6086111000 -66.9327777000,
+ -18.6055555000 -66.8158332999,
+ -18.7186111000 -66.8102777000,
+ -18.7211111000 -66.9269443999,
+ -18.6086111000 -66.9327777000))'));
+INSERT INTO t1 (c1) VALUES (
+PolygonFromText('POLYGON((-65.7402776999 -96.6686111000,
+ -65.7372222000 -96.5516666000,
+ -65.8502777000 -96.5461111000,
+ -65.8527777000 -96.6627777000,
+ -65.7402776999 -96.6686111000))'));
+INSERT INTO t1 (c1) VALUES (
+PolygonFromText('POLYGON((-18.6086111000 -66.9327777000,
+ -18.6055555000 -66.8158332999,
+ -18.7186111000 -66.8102777000,
+ -18.7211111000 -66.9269443999,
+ -18.6086111000 -66.9327777000))'));
+CHECK TABLE t1 EXTENDED;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+DROP TABLE t1;
diff --git a/mysql-test/r/gis.result b/mysql-test/r/gis.result
index 84b4e6c2569..9017e96c8f4 100644
--- a/mysql-test/r/gis.result
+++ b/mysql-test/r/gis.result
@@ -694,3 +694,13 @@ alter table t1 add primary key pti(pt);
ERROR 42000: BLOB/TEXT column 'pt' used in key specification without a key length
alter table t1 add primary key pti(pt(20));
drop table t1;
+create table t1 (g GEOMETRY);
+select * from t1;
+Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
+def test t1 t1 g g 255 4294967295 0 Y 144 0 63
+g
+select asbinary(g) from t1;
+Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
+def asbinary(g) 252 8192 0 Y 128 0 63
+asbinary(g)
+drop table t1;
diff --git a/mysql-test/r/group_by.result b/mysql-test/r/group_by.result
index e662c4d7785..00f675db075 100644
--- a/mysql-test/r/group_by.result
+++ b/mysql-test/r/group_by.result
@@ -796,3 +796,28 @@ aaa
show warnings;
Level Code Message
drop table t1, t2;
+CREATE TABLE t1 (a tinyint(3), b varchar(255), PRIMARY KEY (a));
+INSERT INTO t1 VALUES (1,'-----'), (6,'Allemagne'), (17,'Autriche'),
+(25,'Belgique'), (54,'Danemark'), (62,'Espagne'), (68,'France');
+CREATE TABLE t2 (a tinyint(3), b tinyint(3), PRIMARY KEY (a), KEY b (b));
+INSERT INTO t2 VALUES (1,1), (2,1), (6,6), (18,17), (15,25), (16,25),
+(17,25), (10,54), (5,62),(3,68);
+CREATE VIEW v1 AS select t1.a, concat(t1.b,'') AS b, t1.b as real_b from t1;
+explain
+SELECT straight_join sql_no_cache v1.a, v1.b, v1.real_b from t2, v1
+where t2.b=v1.a GROUP BY t2.b;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t2 index b b 2 NULL 10 Using index
+1 PRIMARY t1 eq_ref PRIMARY PRIMARY 1 test.t2.b 1
+SELECT straight_join sql_no_cache v1.a, v1.b, v1.real_b from t2, v1
+where t2.b=v1.a GROUP BY t2.b;
+a b real_b
+1 ----- -----
+6 Allemagne Allemagne
+17 Autriche Autriche
+25 Belgique Belgique
+54 Danemark Danemark
+62 Espagne Espagne
+68 France France
+DROP VIEW v1;
+DROP TABLE t1,t2;
diff --git a/mysql-test/r/group_min_max.result b/mysql-test/r/group_min_max.result
index 87dc6618011..2d72540c8a9 100644
--- a/mysql-test/r/group_min_max.result
+++ b/mysql-test/r/group_min_max.result
@@ -2119,3 +2119,46 @@ SOUTH EAST SOUTH EAST
SOUTH WEST SOUTH WEST
WESTERN WESTERN
DROP TABLE t1;
+CREATE TABLE t1 (id1 INT, id2 INT);
+CREATE TABLE t2 (id2 INT, id3 INT, id5 INT);
+CREATE TABLE t3 (id3 INT, id4 INT);
+CREATE TABLE t4 (id4 INT);
+CREATE TABLE t5 (id5 INT, id6 INT);
+CREATE TABLE t6 (id6 INT);
+INSERT INTO t1 VALUES(1,1);
+INSERT INTO t2 VALUES(1,1,1);
+INSERT INTO t3 VALUES(1,1);
+INSERT INTO t4 VALUES(1);
+INSERT INTO t5 VALUES(1,1);
+INSERT INTO t6 VALUES(1);
+SELECT * FROM
+t1
+NATURAL JOIN
+(t2 JOIN (t3 NATURAL JOIN t4, t5 NATURAL JOIN t6)
+ON (t3.id3 = t2.id3 AND t5.id5 = t2.id5));
+id2 id1 id3 id5 id4 id3 id6 id5
+1 1 1 1 1 1 1 1
+SELECT * FROM
+t1
+NATURAL JOIN
+(((t3 NATURAL JOIN t4) join (t5 NATURAL JOIN t6) on t3.id4 = t5.id5) JOIN t2
+ON (t3.id3 = t2.id3 AND t5.id5 = t2.id5));
+id2 id1 id4 id3 id6 id5 id3 id5
+1 1 1 1 1 1 1 1
+SELECT * FROM t1 NATURAL JOIN ((t3 join (t5 NATURAL JOIN t6)) JOIN t2);
+id2 id1 id3 id4 id6 id5 id3 id5
+1 1 1 1 1 1 1 1
+SELECT * FROM
+(t2 JOIN (t3 NATURAL JOIN t4, t5 NATURAL JOIN t6)
+ON (t3.id3 = t2.id3 AND t5.id5 = t2.id5))
+NATURAL JOIN
+t1;
+id2 id3 id5 id4 id3 id6 id5 id1
+1 1 1 1 1 1 1 1
+SELECT * FROM
+(t2 JOIN ((t3 NATURAL JOIN t4) join (t5 NATURAL JOIN t6)))
+NATURAL JOIN
+t1;
+id2 id3 id5 id4 id3 id6 id5 id1
+1 1 1 1 1 1 1 1
+DROP TABLE t1,t2,t3,t4,t5,t6;
diff --git a/mysql-test/r/group_min_max_innodb.result b/mysql-test/r/group_min_max_innodb.result
new file mode 100644
index 00000000000..ae4b9d4d5dd
--- /dev/null
+++ b/mysql-test/r/group_min_max_innodb.result
@@ -0,0 +1,72 @@
+create table t4 (
+pk_col int auto_increment primary key, a1 char(64), a2 char(64), b char(16), c char(16) not null, d char(16), dummy char(64) default ' '
+) engine=innodb;
+insert into t4 (a1, a2, b, c, d) values
+('a','a','a','a111','xy1'),('a','a','a','b111','xy2'),('a','a','a','c111','xy3'),('a','a','a','d111','xy4'),
+('a','a','b','e112','xy1'),('a','a','b','f112','xy2'),('a','a','b','g112','xy3'),('a','a','b','h112','xy4'),
+('a','b','a','i121','xy1'),('a','b','a','j121','xy2'),('a','b','a','k121','xy3'),('a','b','a','l121','xy4'),
+('a','b','b','m122','xy1'),('a','b','b','n122','xy2'),('a','b','b','o122','xy3'),('a','b','b','p122','xy4'),
+('b','a','a','a211','xy1'),('b','a','a','b211','xy2'),('b','a','a','c211','xy3'),('b','a','a','d211','xy4'),
+('b','a','b','e212','xy1'),('b','a','b','f212','xy2'),('b','a','b','g212','xy3'),('b','a','b','h212','xy4'),
+('b','b','a','i221','xy1'),('b','b','a','j221','xy2'),('b','b','a','k221','xy3'),('b','b','a','l221','xy4'),
+('b','b','b','m222','xy1'),('b','b','b','n222','xy2'),('b','b','b','o222','xy3'),('b','b','b','p222','xy4'),
+('c','a','a','a311','xy1'),('c','a','a','b311','xy2'),('c','a','a','c311','xy3'),('c','a','a','d311','xy4'),
+('c','a','b','e312','xy1'),('c','a','b','f312','xy2'),('c','a','b','g312','xy3'),('c','a','b','h312','xy4'),
+('c','b','a','i321','xy1'),('c','b','a','j321','xy2'),('c','b','a','k321','xy3'),('c','b','a','l321','xy4'),
+('c','b','b','m322','xy1'),('c','b','b','n322','xy2'),('c','b','b','o322','xy3'),('c','b','b','p322','xy4'),
+('d','a','a','a411','xy1'),('d','a','a','b411','xy2'),('d','a','a','c411','xy3'),('d','a','a','d411','xy4'),
+('d','a','b','e412','xy1'),('d','a','b','f412','xy2'),('d','a','b','g412','xy3'),('d','a','b','h412','xy4'),
+('d','b','a','i421','xy1'),('d','b','a','j421','xy2'),('d','b','a','k421','xy3'),('d','b','a','l421','xy4'),
+('d','b','b','m422','xy1'),('d','b','b','n422','xy2'),('d','b','b','o422','xy3'),('d','b','b','p422','xy4'),
+('a','a','a','a111','xy1'),('a','a','a','b111','xy2'),('a','a','a','c111','xy3'),('a','a','a','d111','xy4'),
+('a','a','b','e112','xy1'),('a','a','b','f112','xy2'),('a','a','b','g112','xy3'),('a','a','b','h112','xy4'),
+('a','b','a','i121','xy1'),('a','b','a','j121','xy2'),('a','b','a','k121','xy3'),('a','b','a','l121','xy4'),
+('a','b','b','m122','xy1'),('a','b','b','n122','xy2'),('a','b','b','o122','xy3'),('a','b','b','p122','xy4'),
+('b','a','a','a211','xy1'),('b','a','a','b211','xy2'),('b','a','a','c211','xy3'),('b','a','a','d211','xy4'),
+('b','a','b','e212','xy1'),('b','a','b','f212','xy2'),('b','a','b','g212','xy3'),('b','a','b','h212','xy4'),
+('b','b','a','i221','xy1'),('b','b','a','j221','xy2'),('b','b','a','k221','xy3'),('b','b','a','l221','xy4'),
+('b','b','b','m222','xy1'),('b','b','b','n222','xy2'),('b','b','b','o222','xy3'),('b','b','b','p222','xy4'),
+('c','a','a','a311','xy1'),('c','a','a','b311','xy2'),('c','a','a','c311','xy3'),('c','a','a','d311','xy4'),
+('c','a','b','e312','xy1'),('c','a','b','f312','xy2'),('c','a','b','g312','xy3'),('c','a','b','h312','xy4'),
+('c','b','a','i321','xy1'),('c','b','a','j321','xy2'),('c','b','a','k321','xy3'),('c','b','a','l321','xy4'),
+('c','b','b','m322','xy1'),('c','b','b','n322','xy2'),('c','b','b','o322','xy3'),('c','b','b','p322','xy4'),
+('d','a','a','a411','xy1'),('d','a','a','b411','xy2'),('d','a','a','c411','xy3'),('d','a','a','d411','xy4'),
+('d','a','b','e412','xy1'),('d','a','b','f412','xy2'),('d','a','b','g412','xy3'),('d','a','b','h412','xy4'),
+('d','b','a','i421','xy1'),('d','b','a','j421','xy2'),('d','b','a','k421','xy3'),('d','b','a','l421','xy4'),
+('d','b','b','m422','xy1'),('d','b','b','n422','xy2'),('d','b','b','o422','xy3'),('d','b','b','p422','xy4');
+create index idx12672_0 on t4 (a1);
+create index idx12672_1 on t4 (a1,a2,b,c);
+create index idx12672_2 on t4 (a1,a2,b);
+analyze table t4;
+Table Op Msg_type Msg_text
+test.t4 analyze status OK
+select distinct a1 from t4 where pk_col not in (1,2,3,4);
+a1
+a
+b
+c
+d
+drop table t4;
+create table t1 (
+a varchar(30), b varchar(30), primary key(a), key(b)
+) engine=innodb;
+select distinct a from t1;
+a
+drop table t1;
+create table t1(a int, key(a)) engine=innodb;
+insert into t1 values(1);
+select a, count(a) from t1 group by a with rollup;
+a count(a)
+1 1
+NULL 1
+drop table t1;
+create table t1 (f1 int, f2 char(1), primary key(f1,f2)) engine=innodb;
+insert into t1 values ( 1,"e"),(2,"a"),( 3,"c"),(4,"d");
+alter table t1 drop primary key, add primary key (f2, f1);
+explain select distinct f1 a, f1 b from t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL PRIMARY 5 NULL 4 Using index; Using temporary
+explain select distinct f1, f2 from t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range NULL PRIMARY 5 NULL 3 Using index for group-by; Using temporary
+drop table t1;
diff --git a/mysql-test/r/information_schema.result b/mysql-test/r/information_schema.result
index 308a92cb800..9e6a3f298c1 100644
--- a/mysql-test/r/information_schema.result
+++ b/mysql-test/r/information_schema.result
@@ -310,26 +310,26 @@ show create function sub1;
ERROR 42000: FUNCTION sub1 does not exist
select ROUTINE_NAME, ROUTINE_DEFINITION from information_schema.ROUTINES;
ROUTINE_NAME ROUTINE_DEFINITION
-sel2
-sub1
+sel2 NULL
+sub1 NULL
grant all privileges on test.* to mysqltest_1@localhost;
select ROUTINE_NAME, ROUTINE_DEFINITION from information_schema.ROUTINES;
ROUTINE_NAME ROUTINE_DEFINITION
-sel2
-sub1
+sel2 NULL
+sub1 NULL
create function sub2(i int) returns int
return i+1;
select ROUTINE_NAME, ROUTINE_DEFINITION from information_schema.ROUTINES;
ROUTINE_NAME ROUTINE_DEFINITION
-sel2
-sub1
+sel2 NULL
+sub1 NULL
sub2 return i+1
show create procedure sel2;
Procedure sql_mode Create Procedure
-sel2
+sel2 NULL
show create function sub1;
Function sql_mode Create Function
-sub1
+sub1 NULL
show create function sub2;
Function sql_mode Create Function
sub2 CREATE DEFINER=`mysqltest_1`@`localhost` FUNCTION `sub2`(i int) RETURNS int(11)
@@ -386,11 +386,11 @@ show keys from v4;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment
select * from information_schema.views where TABLE_NAME like "v%";
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME VIEW_DEFINITION CHECK_OPTION IS_UPDATABLE DEFINER SECURITY_TYPE
-NULL test v0 select sql_no_cache `schemata`.`SCHEMA_NAME` AS `c` from `information_schema`.`schemata` NONE NO root@localhost DEFINER
-NULL test v1 select sql_no_cache `tables`.`TABLE_NAME` AS `c` from `information_schema`.`tables` where (`tables`.`TABLE_NAME` = _utf8'v1') NONE NO root@localhost DEFINER
-NULL test v2 select sql_no_cache `columns`.`COLUMN_NAME` AS `c` from `information_schema`.`columns` where (`columns`.`TABLE_NAME` = _utf8'v2') NONE NO root@localhost DEFINER
-NULL test v3 select sql_no_cache `character_sets`.`CHARACTER_SET_NAME` AS `c` from `information_schema`.`character_sets` where (`character_sets`.`CHARACTER_SET_NAME` like _utf8'latin1%') NONE NO root@localhost DEFINER
-NULL test v4 select sql_no_cache `collations`.`COLLATION_NAME` AS `c` from `information_schema`.`collations` where (`collations`.`COLLATION_NAME` like _utf8'latin1%') NONE NO root@localhost DEFINER
+NULL test v0 /* ALGORITHM=UNDEFINED */ select `schemata`.`SCHEMA_NAME` AS `c` from `information_schema`.`schemata` NONE NO root@localhost DEFINER
+NULL test v1 /* ALGORITHM=UNDEFINED */ select `tables`.`TABLE_NAME` AS `c` from `information_schema`.`tables` where (`tables`.`TABLE_NAME` = _utf8'v1') NONE NO root@localhost DEFINER
+NULL test v2 /* ALGORITHM=UNDEFINED */ select `columns`.`COLUMN_NAME` AS `c` from `information_schema`.`columns` where (`columns`.`TABLE_NAME` = _utf8'v2') NONE NO root@localhost DEFINER
+NULL test v3 /* ALGORITHM=UNDEFINED */ select `character_sets`.`CHARACTER_SET_NAME` AS `c` from `information_schema`.`character_sets` where (`character_sets`.`CHARACTER_SET_NAME` like _utf8'latin1%') NONE NO root@localhost DEFINER
+NULL test v4 /* ALGORITHM=UNDEFINED */ select `collations`.`COLLATION_NAME` AS `c` from `information_schema`.`collations` where (`collations`.`COLLATION_NAME` like _utf8'latin1%') NONE NO root@localhost DEFINER
drop view v0, v1, v2, v3, v4;
create table t1 (a int);
grant select,update,insert on t1 to mysqltest_1@localhost;
@@ -483,9 +483,9 @@ create view v2 (c) as select a from t1 WITH LOCAL CHECK OPTION;
create view v3 (c) as select a from t1 WITH CASCADED CHECK OPTION;
select * from information_schema.views;
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME VIEW_DEFINITION CHECK_OPTION IS_UPDATABLE DEFINER SECURITY_TYPE
-NULL test v1 select `test`.`t1`.`a` AS `c` from `test`.`t1` CASCADED YES root@localhost DEFINER
-NULL test v2 select `test`.`t1`.`a` AS `c` from `test`.`t1` LOCAL YES root@localhost DEFINER
-NULL test v3 select `test`.`t1`.`a` AS `c` from `test`.`t1` CASCADED YES root@localhost DEFINER
+NULL test v1 /* ALGORITHM=UNDEFINED */ select `test`.`t1`.`a` AS `c` from `test`.`t1` CASCADED YES root@localhost DEFINER
+NULL test v2 /* ALGORITHM=UNDEFINED */ select `test`.`t1`.`a` AS `c` from `test`.`t1` LOCAL YES root@localhost DEFINER
+NULL test v3 /* ALGORITHM=UNDEFINED */ select `test`.`t1`.`a` AS `c` from `test`.`t1` CASCADED YES root@localhost DEFINER
grant select (a) on test.t1 to joe@localhost with grant option;
select * from INFORMATION_SCHEMA.COLUMN_PRIVILEGES;
GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME PRIVILEGE_TYPE IS_GRANTABLE
@@ -705,7 +705,7 @@ Warnings:
Warning 1356 View 'test.v2' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them
show create table v3;
View Create View
-v3 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v3` AS select sql_no_cache `test`.`sub1`(1) AS `c`
+v3 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v3` AS select `test`.`sub1`(1) AS `c`
Warnings:
Warning 1356 View 'test.v3' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them
drop view v2;
@@ -1169,7 +1169,7 @@ select * from information_schema.views
where table_name='v1' or table_name='v2';
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME VIEW_DEFINITION CHECK_OPTION IS_UPDATABLE DEFINER SECURITY_TYPE
NULL test v1 NONE YES root@localhost DEFINER
-NULL test v2 select 1 AS `1` NONE NO mysqltest_1@localhost DEFINER
+NULL test v2 /* ALGORITHM=UNDEFINED */ select 1 AS `1` NONE NO mysqltest_1@localhost DEFINER
drop view v1, v2;
drop table t1;
drop user mysqltest_1@localhost;
@@ -1182,6 +1182,127 @@ concat(@a, table_name) @a table_name
.t1 . t1
.t2 . t2
drop table t1,t2;
+DROP PROCEDURE IF EXISTS p1;
+DROP FUNCTION IF EXISTS f1;
+CREATE PROCEDURE p1() SET @a= 1;
+CREATE FUNCTION f1() RETURNS INT RETURN @a + 1;
+CREATE USER mysql_bug20230@localhost;
+GRANT EXECUTE ON PROCEDURE p1 TO mysql_bug20230@localhost;
+GRANT EXECUTE ON FUNCTION f1 TO mysql_bug20230@localhost;
+SELECT ROUTINE_NAME, ROUTINE_DEFINITION FROM INFORMATION_SCHEMA.ROUTINES;
+ROUTINE_NAME ROUTINE_DEFINITION
+f1 RETURN @a + 1
+p1 SET @a= 1
+SHOW CREATE PROCEDURE p1;
+Procedure sql_mode Create Procedure
+p1 CREATE DEFINER=`root`@`localhost` PROCEDURE `p1`()
+SET @a= 1
+SHOW CREATE FUNCTION f1;
+Function sql_mode Create Function
+f1 CREATE DEFINER=`root`@`localhost` FUNCTION `f1`() RETURNS int(11)
+RETURN @a + 1
+SELECT ROUTINE_NAME, ROUTINE_DEFINITION FROM INFORMATION_SCHEMA.ROUTINES;
+ROUTINE_NAME ROUTINE_DEFINITION
+f1 NULL
+p1 NULL
+SHOW CREATE PROCEDURE p1;
+Procedure sql_mode Create Procedure
+p1 NULL
+SHOW CREATE FUNCTION f1;
+Function sql_mode Create Function
+f1 NULL
+CALL p1();
+SELECT f1();
+f1()
+2
+DROP FUNCTION f1;
+DROP PROCEDURE p1;
+DROP USER mysql_bug20230@localhost;
+SELECT t.table_name, c1.column_name
+FROM information_schema.tables t
+INNER JOIN
+information_schema.columns c1
+ON t.table_schema = c1.table_schema AND
+t.table_name = c1.table_name
+WHERE t.table_schema = 'information_schema' AND
+c1.ordinal_position =
+( SELECT COALESCE(MIN(c2.ordinal_position),1)
+FROM information_schema.columns c2
+WHERE c2.table_schema = t.table_schema AND
+c2.table_name = t.table_name AND
+c2.column_name LIKE '%SCHEMA%'
+ );
+table_name column_name
+CHARACTER_SETS CHARACTER_SET_NAME
+COLLATIONS COLLATION_NAME
+COLLATION_CHARACTER_SET_APPLICABILITY COLLATION_NAME
+COLUMNS TABLE_SCHEMA
+COLUMN_PRIVILEGES TABLE_SCHEMA
+ENGINES ENGINE
+EVENTS EVENT_SCHEMA
+FILES TABLE_SCHEMA
+KEY_COLUMN_USAGE CONSTRAINT_SCHEMA
+PARTITIONS TABLE_SCHEMA
+PLUGINS PLUGIN_NAME
+PROCESSLIST ID
+REFERENTIAL_CONSTRAINTS CONSTRAINT_SCHEMA
+ROUTINES ROUTINE_SCHEMA
+SCHEMATA SCHEMA_NAME
+SCHEMA_PRIVILEGES TABLE_SCHEMA
+STATISTICS TABLE_SCHEMA
+TABLES TABLE_SCHEMA
+TABLE_CONSTRAINTS CONSTRAINT_SCHEMA
+TABLE_PRIVILEGES TABLE_SCHEMA
+TRIGGERS TRIGGER_SCHEMA
+USER_PRIVILEGES GRANTEE
+VIEWS TABLE_SCHEMA
+SELECT t.table_name, c1.column_name
+FROM information_schema.tables t
+INNER JOIN
+information_schema.columns c1
+ON t.table_schema = c1.table_schema AND
+t.table_name = c1.table_name
+WHERE t.table_schema = 'information_schema' AND
+c1.ordinal_position =
+( SELECT COALESCE(MIN(c2.ordinal_position),1)
+FROM information_schema.columns c2
+WHERE c2.table_schema = 'information_schema' AND
+c2.table_name = t.table_name AND
+c2.column_name LIKE '%SCHEMA%'
+ );
+table_name column_name
+CHARACTER_SETS CHARACTER_SET_NAME
+COLLATIONS COLLATION_NAME
+COLLATION_CHARACTER_SET_APPLICABILITY COLLATION_NAME
+COLUMNS TABLE_SCHEMA
+COLUMN_PRIVILEGES TABLE_SCHEMA
+ENGINES ENGINE
+EVENTS EVENT_SCHEMA
+FILES TABLE_SCHEMA
+KEY_COLUMN_USAGE CONSTRAINT_SCHEMA
+PARTITIONS TABLE_SCHEMA
+PLUGINS PLUGIN_NAME
+PROCESSLIST ID
+REFERENTIAL_CONSTRAINTS CONSTRAINT_SCHEMA
+ROUTINES ROUTINE_SCHEMA
+SCHEMATA SCHEMA_NAME
+SCHEMA_PRIVILEGES TABLE_SCHEMA
+STATISTICS TABLE_SCHEMA
+TABLES TABLE_SCHEMA
+TABLE_CONSTRAINTS CONSTRAINT_SCHEMA
+TABLE_PRIVILEGES TABLE_SCHEMA
+TRIGGERS TRIGGER_SCHEMA
+USER_PRIVILEGES GRANTEE
+VIEWS TABLE_SCHEMA
+SELECT MAX(table_name) FROM information_schema.tables;
+MAX(table_name)
+VIEWS
+SELECT table_name from information_schema.tables
+WHERE table_name=(SELECT MAX(table_name)
+FROM information_schema.tables);
+table_name
+VIEWS
+End of 5.0 tests.
select * from information_schema.engines WHERE ENGINE="MyISAM";
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
MyISAM ENABLED Default engine as of MySQL 3.23 with great performance NO NO NO
@@ -1190,3 +1311,4 @@ select user,db from information_schema.processlist;
user db
user3148 test
drop user user3148@localhost;
+End of 5.1 tests.
diff --git a/mysql-test/r/information_schema_part.result b/mysql-test/r/information_schema_part.result
index 2fd241ed4e4..6ba980e0f21 100644
--- a/mysql-test/r/information_schema_part.result
+++ b/mysql-test/r/information_schema_part.result
@@ -7,9 +7,9 @@ partition x3 values in (16, 8, 5+19, 70-43) tablespace ts3);
select * from information_schema.partitions where table_schema="test"
and table_name="t1";
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
-NULL test t1 x1 NULL 1 NULL LIST NULL b*a NULL 1 0 0 0 # 1024 0 # # NULL NULL default 0 ts1
-NULL test t1 x2 NULL 2 NULL LIST NULL b*a NULL 3,11,5,7 0 0 0 # 1024 0 # # NULL NULL default 0 ts2
-NULL test t1 x3 NULL 3 NULL LIST NULL b*a NULL 16,8,24,27 0 0 0 # 1024 0 # # NULL NULL default 0 ts3
+NULL test t1 x1 NULL 1 NULL LIST NULL b*a NULL 1 0 0 0 # 1024 0 # # NULL NULL default default ts1
+NULL test t1 x2 NULL 2 NULL LIST NULL b*a NULL 3,11,5,7 0 0 0 # 1024 0 # # NULL NULL default default ts2
+NULL test t1 x3 NULL 3 NULL LIST NULL b*a NULL 16,8,24,27 0 0 0 # 1024 0 # # NULL NULL default default ts3
create table t2 (a int not null,b int not null,c int not null, primary key(a,b))
partition by range (a)
partitions 3
@@ -19,27 +19,27 @@ partition x3 values less than maxvalue tablespace ts3);
select * from information_schema.partitions where table_schema="test"
and table_name="t2";
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
-NULL test t2 x1 NULL 1 NULL RANGE NULL a NULL 5 0 0 0 # 1024 0 # # NULL NULL default 0 ts1
-NULL test t2 x2 NULL 2 NULL RANGE NULL a NULL 10 0 0 0 # 1024 0 # # NULL NULL default 0 ts2
-NULL test t2 x3 NULL 3 NULL RANGE NULL a NULL MAXVALUE 0 0 0 # 1024 0 # # NULL NULL default 0 ts3
+NULL test t2 x1 NULL 1 NULL RANGE NULL a NULL 5 0 0 0 # 1024 0 # # NULL NULL default default ts1
+NULL test t2 x2 NULL 2 NULL RANGE NULL a NULL 10 0 0 0 # 1024 0 # # NULL NULL default default ts2
+NULL test t2 x3 NULL 3 NULL RANGE NULL a NULL MAXVALUE 0 0 0 # 1024 0 # # NULL NULL default default ts3
create table t3 (f1 date)
partition by hash(month(f1))
partitions 3;
select * from information_schema.partitions where table_schema="test"
and table_name="t3";
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
-NULL test t3 p0 NULL 1 NULL HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default 0 default
-NULL test t3 p1 NULL 2 NULL HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default 0 default
-NULL test t3 p2 NULL 3 NULL HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default 0 default
+NULL test t3 p0 NULL 1 NULL HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default default default
+NULL test t3 p1 NULL 2 NULL HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default default default
+NULL test t3 p2 NULL 3 NULL HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default default default
create table t4 (f1 date, f2 int)
partition by key(f1,f2)
partitions 3;
select * from information_schema.partitions where table_schema="test"
and table_name="t4";
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
-NULL test t4 p0 NULL 1 NULL KEY NULL f1,f2 NULL NULL 0 0 0 # 1024 0 # # NULL NULL default 0 default
-NULL test t4 p1 NULL 2 NULL KEY NULL f1,f2 NULL NULL 0 0 0 # 1024 0 # # NULL NULL default 0 default
-NULL test t4 p2 NULL 3 NULL KEY NULL f1,f2 NULL NULL 0 0 0 # 1024 0 # # NULL NULL default 0 default
+NULL test t4 p0 NULL 1 NULL KEY NULL f1,f2 NULL NULL 0 0 0 # 1024 0 # # NULL NULL default default default
+NULL test t4 p1 NULL 2 NULL KEY NULL f1,f2 NULL NULL 0 0 0 # 1024 0 # # NULL NULL default default default
+NULL test t4 p2 NULL 3 NULL KEY NULL f1,f2 NULL NULL 0 0 0 # 1024 0 # # NULL NULL default default default
drop table t1,t2,t3,t4;
create table t1 (a int not null,b int not null,c int not null,primary key (a,b))
partition by range (a)
@@ -63,14 +63,14 @@ subpartition x22 tablespace t2)
);
select * from information_schema.partitions where table_schema="test";
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
-NULL test t1 x1 x11 1 1 RANGE HASH a a+b 1 0 0 0 # 1024 0 # # NULL NULL default 0 t1
-NULL test t1 x1 x12 1 2 RANGE HASH a a+b 1 0 0 0 # 1024 0 # # NULL NULL default 0 t2
-NULL test t1 x2 x21 2 1 RANGE HASH a a+b 5 0 0 0 # 1024 0 # # NULL NULL default 0 t1
-NULL test t1 x2 x22 2 2 RANGE HASH a a+b 5 0 0 0 # 1024 0 # # NULL NULL default 0 t2
-NULL test t2 x1 x11 1 1 RANGE KEY a a 1 0 0 0 # 1024 0 # # NULL NULL default 0 t1
-NULL test t2 x1 x12 1 2 RANGE KEY a a 1 0 0 0 # 1024 0 # # NULL NULL default 0 t2
-NULL test t2 x2 x21 2 1 RANGE KEY a a 5 0 0 0 # 1024 0 # # NULL NULL default 0 t1
-NULL test t2 x2 x22 2 2 RANGE KEY a a 5 0 0 0 # 1024 0 # # NULL NULL default 0 t2
+NULL test t1 x1 x11 1 1 RANGE HASH a a+b 1 0 0 0 # 1024 0 # # NULL NULL default default t1
+NULL test t1 x1 x12 1 2 RANGE HASH a a+b 1 0 0 0 # 1024 0 # # NULL NULL default default t2
+NULL test t1 x2 x21 2 1 RANGE HASH a a+b 5 0 0 0 # 1024 0 # # NULL NULL default default t1
+NULL test t1 x2 x22 2 2 RANGE HASH a a+b 5 0 0 0 # 1024 0 # # NULL NULL default default t2
+NULL test t2 x1 x11 1 1 RANGE KEY a a 1 0 0 0 # 1024 0 # # NULL NULL default default t1
+NULL test t2 x1 x12 1 2 RANGE KEY a a 1 0 0 0 # 1024 0 # # NULL NULL default default t2
+NULL test t2 x2 x21 2 1 RANGE KEY a a 5 0 0 0 # 1024 0 # # NULL NULL default default t1
+NULL test t2 x2 x22 2 2 RANGE KEY a a 5 0 0 0 # 1024 0 # # NULL NULL default default t2
drop table t1,t2;
create table t1 (
a int not null,
@@ -99,7 +99,7 @@ drop table t1;
create table t1(f1 int, f2 int);
select * from information_schema.partitions where table_schema="test";
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
-NULL test t1 NULL NULL NULL NULL NULL NULL NULL NULL NULL 0 0 0 # 1024 0 # # NULL NULL 0
+NULL test t1 NULL NULL NULL NULL NULL NULL NULL NULL NULL 0 0 0 # 1024 0 # # NULL NULL
drop table t1;
create table t1 (f1 date)
partition by linear hash(month(f1))
@@ -107,9 +107,9 @@ partitions 3;
select * from information_schema.partitions where table_schema="test"
and table_name="t1";
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
-NULL test t1 p0 NULL 1 NULL LINEAR HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default 0 default
-NULL test t1 p1 NULL 2 NULL LINEAR HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default 0 default
-NULL test t1 p2 NULL 3 NULL LINEAR HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default 0 default
+NULL test t1 p0 NULL 1 NULL LINEAR HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default default default
+NULL test t1 p1 NULL 2 NULL LINEAR HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default default default
+NULL test t1 p2 NULL 3 NULL LINEAR HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default default default
drop table t1;
create table t1 (a int)
PARTITION BY RANGE (a)
diff --git a/mysql-test/r/init_connect.result b/mysql-test/r/init_connect.result
index eeae422edc4..f90ee5913a1 100644
--- a/mysql-test/r/init_connect.result
+++ b/mysql-test/r/init_connect.result
@@ -22,3 +22,117 @@ set GLOBAL init_connect="adsfsdfsdfs";
select @a;
Got one of the listed errors
drop table t1;
+End of 4.1 tests
+create table t1 (x int);
+insert into t1 values (3), (5), (7);
+create table t2 (y int);
+create user mysqltest1@localhost;
+grant all privileges on test.* to mysqltest1@localhost;
+set global init_connect="create procedure p1() select * from t1";
+call p1();
+x
+3
+5
+7
+drop procedure p1;
+set global init_connect="create procedure p1(x int)\
+begin\
+ select count(*) from t1;\
+ select * from t1;\
+ set @x = x;
+end";
+call p1(42);
+count(*)
+3
+x
+3
+5
+7
+select @x;
+@x
+42
+set global init_connect="call p1(4711)";
+select @x;
+@x
+4711
+set global init_connect="drop procedure if exists p1";
+call p1();
+ERROR 42000: PROCEDURE test.p1 does not exist
+create procedure p1(out sum int)
+begin
+declare n int default 0;
+declare c cursor for select * from t1;
+declare exit handler for not found
+begin
+close c;
+set sum = n;
+end;
+open c;
+loop
+begin
+declare x int;
+fetch c into x;
+if x > 3 then
+set n = n + x;
+end if;
+end;
+end loop;
+end|
+set global init_connect="call p1(@sum)";
+select @sum;
+@sum
+12
+drop procedure p1;
+create procedure p1(tbl char(10), v int)
+begin
+set @s = concat('insert into ', tbl, ' values (?)');
+set @v = v;
+prepare stmt1 from @s;
+execute stmt1 using @v;
+deallocate prepare stmt1;
+end|
+set global init_connect="call p1('t1', 11)";
+select * from t1;
+x
+3
+5
+7
+11
+drop procedure p1;
+create function f1() returns int
+begin
+declare n int;
+select count(*) into n from t1;
+return n;
+end|
+set global init_connect="set @x = f1()";
+select @x;
+@x
+4
+set global init_connect="create view v1 as select f1()";
+select * from v1;
+f1()
+4
+set global init_connect="drop view v1";
+select * from v1;
+ERROR 42S02: Table 'test.v1' doesn't exist
+drop function f1;
+create trigger trg1
+after insert on t2
+for each row
+insert into t1 values (new.y);
+set global init_connect="insert into t2 values (13), (17), (19)";
+select * from t1;
+x
+3
+5
+7
+11
+13
+17
+19
+drop trigger trg1;
+set global init_connect=default;
+revoke all privileges, grant option from mysqltest1@localhost;
+drop user mysqltest1@localhost;
+drop table t1, t2;
diff --git a/mysql-test/r/init_file.result b/mysql-test/r/init_file.result
index 9766475a418..1569f2c3d68 100644
--- a/mysql-test/r/init_file.result
+++ b/mysql-test/r/init_file.result
@@ -1 +1,16 @@
ok
+end of 4.1 tests
+select * from t1;
+x
+3
+5
+7
+11
+13
+select * from t2;
+y
+30
+3
+11
+13
+drop table t1, t2;
diff --git a/mysql-test/r/innodb.result b/mysql-test/r/innodb.result
index 72ae29185bc..bc1e85c80f7 100644
--- a/mysql-test/r/innodb.result
+++ b/mysql-test/r/innodb.result
@@ -1473,8 +1473,8 @@ Error 1146 Table 'test.t4' doesn't exist
drop table t1,t2,t3;
create table t1 (id int, name char(10) not null, name2 char(10) not null) engine=innodb;
insert into t1 values(1,'first','fff'),(2,'second','sss'),(3,'third','ttt');
-select name2 from t1 union all select name from t1 union all select id from t1;
-name2
+select trim(name2) from t1 union all select trim(name) from t1 union all select trim(id) from t1;
+trim(name2)
fff
sss
ttt
diff --git a/mysql-test/r/innodb_mysql.result b/mysql-test/r/innodb_mysql.result
index 62e1c2f4019..9f177e99a17 100644
--- a/mysql-test/r/innodb_mysql.result
+++ b/mysql-test/r/innodb_mysql.result
@@ -60,6 +60,35 @@ c.c_id = 218 and expiredate is null;
slai_id
12
drop table t1, t2;
+CREATE TABLE t1 (a int, b int, KEY b (b)) Engine=InnoDB;
+CREATE TABLE t2 (a int, b int, PRIMARY KEY (a,b)) Engine=InnoDB;
+CREATE TABLE t3 (a int, b int, c int, PRIMARY KEY (a),
+UNIQUE KEY b (b,c), KEY a (a,b,c)) Engine=InnoDB;
+INSERT INTO t1 VALUES (1, 1);
+INSERT INTO t1 SELECT a + 1, b + 1 FROM t1;
+INSERT INTO t1 SELECT a + 2, b + 2 FROM t1;
+INSERT INTO t2 VALUES (1,1),(1,2),(1,3),(1,4),(1,5),(1,6),(1,7),(1,8);
+INSERT INTO t2 SELECT a + 1, b FROM t2;
+DELETE FROM t2 WHERE a = 1 AND b < 2;
+INSERT INTO t3 VALUES (1,1,1),(2,1,2);
+INSERT INTO t3 SELECT a + 2, a + 2, 3 FROM t3;
+INSERT INTO t3 SELECT a + 4, a + 4, 3 FROM t3;
+SELECT STRAIGHT_JOIN SQL_NO_CACHE t1.b, t1.a FROM t1, t3, t2 WHERE
+t3.a = t2.a AND t2.b = t1.a AND t3.b = 1 AND t3.c IN (1, 2)
+ORDER BY t1.b LIMIT 2;
+b a
+1 1
+2 2
+SELECT STRAIGHT_JOIN SQL_NO_CACHE t1.b, t1.a FROM t1, t3, t2 WHERE
+t3.a = t2.a AND t2.b = t1.a AND t3.b = 1 AND t3.c IN (1, 2)
+ORDER BY t1.b LIMIT 5;
+b a
+1 1
+2 2
+2 2
+3 3
+3 3
+DROP TABLE t1, t2, t3;
create table t1m (a int) engine=myisam;
create table t1i (a int) engine=innodb;
create table t2m (a int) engine=myisam;
@@ -332,3 +361,9 @@ ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
SELECT * from t2;
a b
drop table t1,t2;
+create table t1(f1 varchar(800) binary not null, key(f1)) engine = innodb
+character set utf8 collate utf8_general_ci;
+Warnings:
+Warning 1071 Specified key was too long; max key length is 765 bytes
+insert into t1 values('aaa');
+drop table t1;
diff --git a/mysql-test/r/insert.result b/mysql-test/r/insert.result
index 235c3f61fe9..b090f0f52c0 100644
--- a/mysql-test/r/insert.result
+++ b/mysql-test/r/insert.result
@@ -353,3 +353,18 @@ select row_count();
row_count()
1
drop table t1;
+create table t1 (id int primary key auto_increment, data int, unique(data));
+insert ignore into t1 values(NULL,100),(NULL,110),(NULL,120);
+insert ignore into t1 values(NULL,10),(NULL,20),(NULL,110),(NULL,120),(NULL,100),(NULL,90);
+insert ignore into t1 values(NULL,130),(NULL,140),(500,110),(550,120),(450,100),(NULL,150);
+select * from t1 order by id;
+id data
+1 100
+2 110
+3 120
+4 10
+5 20
+6 90
+7 130
+8 140
+9 150
diff --git a/mysql-test/r/join_outer.result b/mysql-test/r/join_outer.result
index f4ec997e50c..b199ec5b2fb 100644
--- a/mysql-test/r/join_outer.result
+++ b/mysql-test/r/join_outer.result
@@ -1135,25 +1135,6 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL PRIMARY NULL NULL NULL 4 Using where
1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.a 1
DROP TABLE t1,t2;
-CREATE TABLE t1 (id int(11) NOT NULL PRIMARY KEY, name varchar(20),
-INDEX (name)) ENGINE=InnoDB;
-CREATE TABLE t2 (id int(11) NOT NULL PRIMARY KEY, fkey int(11),
-FOREIGN KEY (fkey) REFERENCES t2(id)) ENGINE=InnoDB;
-INSERT INTO t1 VALUES (1,'A1'),(2,'A2'),(3,'B');
-INSERT INTO t2 VALUES (1,1),(2,2),(3,2),(4,3),(5,3);
-EXPLAIN
-SELECT COUNT(*) FROM t2 LEFT JOIN t1 ON t2.fkey = t1.id
-WHERE t1.name LIKE 'A%';
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index PRIMARY,name name 23 NULL 3 Using where; Using index
-1 SIMPLE t2 ref fkey fkey 5 test.t1.id 1 Using where; Using index
-EXPLAIN
-SELECT COUNT(*) FROM t2 LEFT JOIN t1 ON t2.fkey = t1.id
-WHERE t1.name LIKE 'A%' OR FALSE;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 index NULL fkey 5 NULL 5 Using index
-1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.fkey 1 Using where
-DROP TABLE t1,t2;
DROP VIEW IF EXISTS v1,v2;
DROP TABLE IF EXISTS t1,t2;
CREATE TABLE t1 (a int);
diff --git a/mysql-test/r/join_outer_innodb.result b/mysql-test/r/join_outer_innodb.result
new file mode 100644
index 00000000000..e8a2d6f668b
--- /dev/null
+++ b/mysql-test/r/join_outer_innodb.result
@@ -0,0 +1,19 @@
+CREATE TABLE t1 (id int(11) NOT NULL PRIMARY KEY, name varchar(20),
+INDEX (name)) ENGINE=InnoDB;
+CREATE TABLE t2 (id int(11) NOT NULL PRIMARY KEY, fkey int(11),
+FOREIGN KEY (fkey) REFERENCES t2(id)) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1,'A1'),(2,'A2'),(3,'B');
+INSERT INTO t2 VALUES (1,1),(2,2),(3,2),(4,3),(5,3);
+EXPLAIN
+SELECT COUNT(*) FROM t2 LEFT JOIN t1 ON t2.fkey = t1.id
+WHERE t1.name LIKE 'A%';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index PRIMARY,name name 23 NULL 3 Using where; Using index
+1 SIMPLE t2 ref fkey fkey 5 test.t1.id 1 Using where; Using index
+EXPLAIN
+SELECT COUNT(*) FROM t2 LEFT JOIN t1 ON t2.fkey = t1.id
+WHERE t1.name LIKE 'A%' OR FALSE;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 index NULL fkey 5 NULL 5 Using index
+1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.fkey 1 Using where
+DROP TABLE t1,t2;
diff --git a/mysql-test/r/lock.result b/mysql-test/r/lock.result
index 079b0253ff6..7cd223197e7 100644
--- a/mysql-test/r/lock.result
+++ b/mysql-test/r/lock.result
@@ -68,3 +68,10 @@ ERROR HY000: Table 't2' was locked with a READ lock and can't be updated
delete t2 from t1,t2 where t1.a=t2.a;
ERROR HY000: Table 't2' was locked with a READ lock and can't be updated
drop table t1,t2;
+drop table if exists t1;
+create table t1 (a int);
+lock table t1 write;
+flush tables with read lock;
+ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction
+unlock tables;
+drop table t1;
diff --git a/mysql-test/r/lock_multi.result b/mysql-test/r/lock_multi.result
index f8cf539bd02..8ff02d898a3 100644
--- a/mysql-test/r/lock_multi.result
+++ b/mysql-test/r/lock_multi.result
@@ -66,6 +66,21 @@ Select_priv
N
use test;
use test;
+CREATE TABLE t1 (c1 int);
+LOCK TABLE t1 WRITE;
+ FLUSH TABLES WITH READ LOCK;
+CREATE TABLE t2 (c1 int);
+UNLOCK TABLES;
+UNLOCK TABLES;
+DROP TABLE t1, t2;
+CREATE TABLE t1 (c1 int);
+LOCK TABLE t1 WRITE;
+ FLUSH TABLES WITH READ LOCK;
+CREATE TABLE t2 AS SELECT * FROM t1;
+ERROR HY000: Table 't2' was not locked with LOCK TABLES
+UNLOCK TABLES;
+UNLOCK TABLES;
+DROP TABLE t1;
CREATE DATABASE mysqltest_1;
FLUSH TABLES WITH READ LOCK;
DROP DATABASE mysqltest_1;
@@ -80,19 +95,3 @@ lock tables t1 write;
alter table t1 auto_increment=0; alter table t1 auto_increment=0; alter table t1 auto_increment=0; alter table t1 auto_increment=0; alter table t1 auto_increment=0; //
unlock tables;
drop table t1;
-use mysql;
-LOCK TABLES columns_priv WRITE, db WRITE, host WRITE, user WRITE;
-FLUSH TABLES;
-use mysql;
- SELECT user.Select_priv FROM user, db WHERE user.user = db.user LIMIT 1;
-OPTIMIZE TABLES columns_priv, db, host, user;
-Table Op Msg_type Msg_text
-mysql.columns_priv optimize status OK
-mysql.db optimize status OK
-mysql.host optimize status OK
-mysql.user optimize status OK
-UNLOCK TABLES;
-Select_priv
-N
-use test;
-use test;
diff --git a/mysql-test/r/log_state.result b/mysql-test/r/log_state.result
index 43735243787..0547c5a5bbf 100644
--- a/mysql-test/r/log_state.result
+++ b/mysql-test/r/log_state.result
@@ -102,7 +102,7 @@ show variables like 'general_log_file';
Variable_name Value
general_log_file #
set global general_log= OFF;
-set global general_log_file='/tmp/log.master';
+set global general_log_file='MYSQLTEST_VARDIR/tmp/log.master';
set global general_log= ON;
create table t1(f1 int);
drop table t1;
diff --git a/mysql-test/r/lowercase_fs_off.result b/mysql-test/r/lowercase_fs_off.result
new file mode 100644
index 00000000000..f610b959a47
--- /dev/null
+++ b/mysql-test/r/lowercase_fs_off.result
@@ -0,0 +1,11 @@
+create database d1;
+grant all on d1.* to 'sample'@'localhost' identified by 'password';
+flush privileges;
+select database();
+database()
+d1
+create database d2;
+ERROR 42000: Access denied for user 'sample'@'localhost' to database 'd2'
+create database D1;
+ERROR 42000: Access denied for user 'sample'@'localhost' to database 'D1'
+drop database if exists d1;
diff --git a/mysql-test/r/merge.result b/mysql-test/r/merge.result
index 38ade38747e..b8cdc99abdc 100644
--- a/mysql-test/r/merge.result
+++ b/mysql-test/r/merge.result
@@ -771,14 +771,6 @@ Table Op Msg_type Msg_text
test.t1 check status OK
test.t2 check status OK
drop table t1, t2, t3;
-drop table if exists t1;
-Warnings:
-Note 1051 Unknown table 't1'
-create table t1 (c char(20)) engine=MyISAM;
-insert into t1 values ("Monty"),("WAX"),("Walrus");
-alter table t1 engine=MERGE;
-ERROR HY000: Table storage engine for 't1' doesn't have this option
-drop table t1;
create table t1 (b bit(1));
create table t2 (b bit(1));
create table tm (b bit(1)) engine = merge union = (t1,t2);
diff --git a/mysql-test/r/myisam.result b/mysql-test/r/myisam.result
index 8209fd83c69..c4d65d8ceaa 100644
--- a/mysql-test/r/myisam.result
+++ b/mysql-test/r/myisam.result
@@ -1436,12 +1436,27 @@ show keys from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment
t1 1 a 1 a A 8 NULL NULL YES BTREE
drop table t1;
+show create table t1;
+show create table t1;
+create table t1 (a int) engine=myisam select 42 a;
+select * from t1;
+a
+9
+select * from t1;
+a
+99
+select * from t1;
+a
+42
+drop table t1;
+End of 4.1 tests
create table t1 (c1 int) engine=myisam pack_keys=0;
create table t2 (c1 int) engine=myisam pack_keys=1;
create table t3 (c1 int) engine=myisam pack_keys=default;
create table t4 (c1 int) engine=myisam pack_keys=2;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '2' at line 1
drop table t1, t2, t3;
+End of 5.0 tests
create table t1 (a int not null, key `a` (a) key_block_size=1024);
show create table t1;
Table Create Table
@@ -1588,3 +1603,4 @@ create table t1 (a int not null, key key_block_size=1024 (a));
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '=1024 (a))' at line 1
create table t1 (a int not null, key `a` key_block_size=1024 (a));
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'key_block_size=1024 (a))' at line 1
+End of 5.1 tests
diff --git a/mysql-test/r/mysql.result b/mysql-test/r/mysql.result
index 4b7084e813c..d70366a7589 100644
--- a/mysql-test/r/mysql.result
+++ b/mysql-test/r/mysql.result
@@ -36,19 +36,19 @@ Tables_in_test
t1
t2
t3
-
+_
Test delimiter : from command line
a
1
-
+_
Test delimiter :; from command line
a
1
-
+_
Test 'go' command(vertical output) G
*************************** 1. row ***************************
a: 1
-
+_
Test 'go' command g
a
1
diff --git a/mysql-test/r/mysql_client.result b/mysql-test/r/mysql_client.result
new file mode 100644
index 00000000000..87d09428ff6
--- /dev/null
+++ b/mysql-test/r/mysql_client.result
@@ -0,0 +1,4 @@
+1
+1
+ERROR 1064 (42000) at line 3: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '' at line 1
+ERROR at line 1: USE must be followed by a database name
diff --git a/mysql-test/r/mysqldump-max.result b/mysql-test/r/mysqldump-max.result
index a7d8bf6c0a3..613db96be93 100644
--- a/mysql-test/r/mysqldump-max.result
+++ b/mysql-test/r/mysqldump-max.result
@@ -115,7 +115,6 @@ CREATE TABLE `t1` (
`name` varchar(32) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-
/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
INSERT DELAYED IGNORE INTO `t1` VALUES (1,'first value'),(2,'first value'),(3,'first value'),(4,'first value'),(5,'first value');
/*!40000 ALTER TABLE `t1` ENABLE KEYS */;
@@ -125,7 +124,6 @@ CREATE TABLE `t2` (
`name` varchar(32) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-
/*!40000 ALTER TABLE `t2` DISABLE KEYS */;
INSERT DELAYED IGNORE INTO `t2` VALUES (1,'first value'),(2,'first value'),(3,'first value'),(4,'first value'),(5,'first value');
/*!40000 ALTER TABLE `t2` ENABLE KEYS */;
@@ -135,7 +133,6 @@ CREATE TABLE `t3` (
`name` varchar(32) DEFAULT NULL
) ENGINE=MEMORY DEFAULT CHARSET=latin1;
-
/*!40000 ALTER TABLE `t3` DISABLE KEYS */;
INSERT DELAYED IGNORE INTO `t3` VALUES (1,'first value'),(2,'first value'),(3,'first value'),(4,'first value'),(5,'first value');
/*!40000 ALTER TABLE `t3` ENABLE KEYS */;
@@ -145,7 +142,6 @@ CREATE TABLE `t4` (
`name` varchar(32) DEFAULT NULL
) ENGINE=MEMORY DEFAULT CHARSET=latin1;
-
/*!40000 ALTER TABLE `t4` DISABLE KEYS */;
INSERT DELAYED IGNORE INTO `t4` VALUES (1,'first value'),(2,'first value'),(3,'first value'),(4,'first value'),(5,'first value');
/*!40000 ALTER TABLE `t4` ENABLE KEYS */;
@@ -155,7 +151,6 @@ CREATE TABLE `t5` (
`name` varchar(32) DEFAULT NULL
) ENGINE=ARCHIVE DEFAULT CHARSET=latin1;
-
/*!40000 ALTER TABLE `t5` DISABLE KEYS */;
INSERT DELAYED IGNORE INTO `t5` VALUES (1,'first value'),(2,'first value'),(3,'first value'),(4,'first value'),(5,'first value');
/*!40000 ALTER TABLE `t5` ENABLE KEYS */;
@@ -165,7 +160,6 @@ CREATE TABLE `t6` (
`name` varchar(32) DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
/*!40000 ALTER TABLE `t6` DISABLE KEYS */;
INSERT IGNORE INTO `t6` VALUES (1,'first value'),(2,'first value'),(3,'first value'),(4,'first value'),(5,'first value');
/*!40000 ALTER TABLE `t6` ENABLE KEYS */;
@@ -200,7 +194,6 @@ CREATE TABLE `t1` (
`name` varchar(32) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-
/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
INSERT DELAYED INTO `t1` VALUES (1,'first value'),(2,'first value'),(3,'first value'),(4,'first value'),(5,'first value');
/*!40000 ALTER TABLE `t1` ENABLE KEYS */;
@@ -210,7 +203,6 @@ CREATE TABLE `t2` (
`name` varchar(32) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-
/*!40000 ALTER TABLE `t2` DISABLE KEYS */;
INSERT DELAYED INTO `t2` VALUES (1,'first value'),(2,'first value'),(3,'first value'),(4,'first value'),(5,'first value');
/*!40000 ALTER TABLE `t2` ENABLE KEYS */;
@@ -220,7 +212,6 @@ CREATE TABLE `t3` (
`name` varchar(32) DEFAULT NULL
) ENGINE=MEMORY DEFAULT CHARSET=latin1;
-
/*!40000 ALTER TABLE `t3` DISABLE KEYS */;
INSERT DELAYED INTO `t3` VALUES (1,'first value'),(2,'first value'),(3,'first value'),(4,'first value'),(5,'first value');
/*!40000 ALTER TABLE `t3` ENABLE KEYS */;
@@ -230,7 +221,6 @@ CREATE TABLE `t4` (
`name` varchar(32) DEFAULT NULL
) ENGINE=MEMORY DEFAULT CHARSET=latin1;
-
/*!40000 ALTER TABLE `t4` DISABLE KEYS */;
INSERT DELAYED INTO `t4` VALUES (1,'first value'),(2,'first value'),(3,'first value'),(4,'first value'),(5,'first value');
/*!40000 ALTER TABLE `t4` ENABLE KEYS */;
@@ -240,7 +230,6 @@ CREATE TABLE `t5` (
`name` varchar(32) DEFAULT NULL
) ENGINE=ARCHIVE DEFAULT CHARSET=latin1;
-
/*!40000 ALTER TABLE `t5` DISABLE KEYS */;
INSERT DELAYED INTO `t5` VALUES (1,'first value'),(2,'first value'),(3,'first value'),(4,'first value'),(5,'first value');
/*!40000 ALTER TABLE `t5` ENABLE KEYS */;
@@ -250,7 +239,6 @@ CREATE TABLE `t6` (
`name` varchar(32) DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
/*!40000 ALTER TABLE `t6` DISABLE KEYS */;
INSERT INTO `t6` VALUES (1,'first value'),(2,'first value'),(3,'first value'),(4,'first value'),(5,'first value');
/*!40000 ALTER TABLE `t6` ENABLE KEYS */;
diff --git a/mysql-test/r/mysqldump.result b/mysql-test/r/mysqldump.result
index bf43ad4eff1..3291ebc3c02 100644
--- a/mysql-test/r/mysqldump.result
+++ b/mysql-test/r/mysqldump.result
@@ -77,12 +77,11 @@ CREATE TABLE `t1` (
`b` float DEFAULT NULL
);
-
-/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
LOCK TABLES `t1` WRITE;
+/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
INSERT INTO `t1` VALUES ('1.23450',2.3456),('1.23450',2.3456),('1.23450',2.3456),('1.23450',2.3456),('1.23450',2.3456);
-UNLOCK TABLES;
/*!40000 ALTER TABLE `t1` ENABLE KEYS */;
+UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
@@ -175,13 +174,12 @@ CREATE TABLE `t1` (
`a` varchar(255) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=koi8r;
-
-/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
LOCK TABLES `t1` WRITE;
+/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
INSERT INTO `t1` VALUES ('абцде');
INSERT INTO `t1` VALUES (NULL);
-UNLOCK TABLES;
/*!40000 ALTER TABLE `t1` ENABLE KEYS */;
+UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
@@ -206,12 +204,11 @@ CREATE TABLE `t1` (
`a` int(11) DEFAULT NULL
) TYPE=MyISAM;
-
-/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
LOCK TABLES `t1` WRITE;
+/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
INSERT INTO `t1` VALUES (1),(2);
-UNLOCK TABLES;
/*!40000 ALTER TABLE `t1` ENABLE KEYS */;
+UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
@@ -230,12 +227,11 @@ CREATE TABLE `t1` (
`a` int(11) DEFAULT NULL
) TYPE=MyISAM;
-
-/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
LOCK TABLES `t1` WRITE;
+/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
INSERT INTO `t1` VALUES (1),(2);
-UNLOCK TABLES;
/*!40000 ALTER TABLE `t1` ENABLE KEYS */;
+UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
@@ -266,11 +262,10 @@ CREATE TABLE `t1` (
`a` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-
-/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
LOCK TABLES `t1` WRITE;
-UNLOCK TABLES;
+/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
/*!40000 ALTER TABLE `t1` ENABLE KEYS */;
+UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
@@ -292,11 +287,10 @@ CREATE TABLE "t1" (
"a" int(11) DEFAULT NULL
);
-
-/*!40000 ALTER TABLE "t1" DISABLE KEYS */;
LOCK TABLES "t1" WRITE;
-UNLOCK TABLES;
+/*!40000 ALTER TABLE "t1" DISABLE KEYS */;
/*!40000 ALTER TABLE "t1" ENABLE KEYS */;
+UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
@@ -321,11 +315,10 @@ CREATE TABLE `t1` (
`a` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-
-/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
LOCK TABLES `t1` WRITE;
-UNLOCK TABLES;
+/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
/*!40000 ALTER TABLE `t1` ENABLE KEYS */;
+UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
@@ -347,11 +340,10 @@ CREATE TABLE "t1" (
"a" int(11) DEFAULT NULL
);
-
-/*!40000 ALTER TABLE "t1" DISABLE KEYS */;
LOCK TABLES "t1" WRITE;
-UNLOCK TABLES;
+/*!40000 ALTER TABLE "t1" DISABLE KEYS */;
/*!40000 ALTER TABLE "t1" ENABLE KEYS */;
+UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
@@ -459,12 +451,11 @@ CREATE TABLE `t1` (
`a` char(10) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-
-/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
LOCK TABLES `t1` WRITE;
+/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
INSERT INTO `t1` VALUES ('ÄÖÜß');
-UNLOCK TABLES;
/*!40000 ALTER TABLE `t1` ENABLE KEYS */;
+UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
@@ -486,12 +477,11 @@ CREATE TABLE `t1` (
`a` char(10) DEFAULT NULL
) TYPE=MyISAM;
-
-/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
LOCK TABLES `t1` WRITE;
+/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
INSERT INTO `t1` VALUES ('Ž™šá');
-UNLOCK TABLES;
/*!40000 ALTER TABLE `t1` ENABLE KEYS */;
+UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
@@ -510,12 +500,11 @@ CREATE TABLE `t1` (
`a` char(10) DEFAULT NULL
) TYPE=MyISAM;
-
-/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
LOCK TABLES `t1` WRITE;
+/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
INSERT INTO `t1` VALUES ('Ž™šá');
-UNLOCK TABLES;
/*!40000 ALTER TABLE `t1` ENABLE KEYS */;
+UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
@@ -534,12 +523,11 @@ CREATE TABLE `t1` (
`a` char(10) DEFAULT NULL
) TYPE=MyISAM;
-
-/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
LOCK TABLES `t1` WRITE;
+/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
INSERT INTO `t1` VALUES ('ÄÖÜß');
-UNLOCK TABLES;
/*!40000 ALTER TABLE `t1` ENABLE KEYS */;
+UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
@@ -568,12 +556,11 @@ CREATE TABLE `t2` (
`a` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-
-/*!40000 ALTER TABLE `t2` DISABLE KEYS */;
LOCK TABLES `t2` WRITE;
+/*!40000 ALTER TABLE `t2` DISABLE KEYS */;
INSERT INTO `t2` VALUES (4),(5),(6);
-UNLOCK TABLES;
/*!40000 ALTER TABLE `t2` ENABLE KEYS */;
+UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
@@ -604,12 +591,11 @@ CREATE TABLE `t1` (
`b` blob
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-
-/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
LOCK TABLES `t1` WRITE;
+/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
INSERT INTO `t1` VALUES (0x602010000280100005E71A);
-UNLOCK TABLES;
/*!40000 ALTER TABLE `t1` ENABLE KEYS */;
+UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
@@ -640,12 +626,11 @@ CREATE TABLE `t1` (
`a` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-
-/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
LOCK TABLES `t1` WRITE;
+/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
INSERT IGNORE INTO `t1` VALUES (1),(2),(3),(4),(5),(6);
-UNLOCK TABLES;
/*!40000 ALTER TABLE `t1` ENABLE KEYS */;
+UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
@@ -672,7 +657,6 @@ CREATE TABLE `t1` (
`a` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-
/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
INSERT DELAYED IGNORE INTO `t1` VALUES (1),(2),(3),(4),(5),(6);
/*!40000 ALTER TABLE `t1` ENABLE KEYS */;
@@ -1364,12 +1348,11 @@ CREATE TABLE `t1` (
`F_fe73f687e5bc5280214e0486b273a5f9` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-
-/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
LOCK TABLES `t1` WRITE;
+/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
INSERT INTO `t1` (`F_c4ca4238a0b923820dcc509a6f75849b`, `F_c81e728d9d4c2f636f067f89cc14862c`, `F_eccbc87e4b5ce2fe28308fd9f2a7baf3`, `F_a87ff679a2f3e71d9181a67b7542122c`, `F_e4da3b7fbbce2345d7772b0674a318d5`, `F_1679091c5a880faf6fb5e6087eb1b2dc`, `F_8f14e45fceea167a5a36dedd4bea2543`, `F_c9f0f895fb98ab9159f51fd0297e236d`, `F_45c48cce2e2d7fbdea1afc51c7c6ad26`, `F_d3d9446802a44259755d38e6d163e820`, `F_6512bd43d9caa6e02c990b0a82652dca`, `F_c20ad4d76fe97759aa27a0c99bff6710`, `F_c51ce410c124a10e0db5e4b97fc2af39`, `F_aab3238922bcc25a6f606eb525ffdc56`, `F_9bf31c7ff062936a96d3c8bd1f8f2ff3`, `F_c74d97b01eae257e44aa9d5bade97baf`, `F_70efdf2ec9b086079795c442636b55fb`, `F_6f4922f45568161a8cdf4ad2299f6d23`, `F_1f0e3dad99908345f7439f8ffabdffc4`, `F_98f13708210194c475687be6106a3b84`, `F_3c59dc048e8850243be8079a5c74d079`, `F_b6d767d2f8ed5d21a44b0e5886680cb9`, `F_37693cfc748049e45d87b8c7d8b9aacd`, `F_1ff1de774005f8da13f42943881c655f`, `F_8e296a067a37563370ded05f5a3bf3ec`, `F_4e732ced3463d06de0ca9a15b6153677`, `F_02e74f10e0327ad868d138f2b4fdd6f0`, `F_33e75ff09dd601bbe69f351039152189`, `F_6ea9ab1baa0efb9e19094440c317e21b`, `F_34173cb38f07f89ddbebc2ac9128303f`, `F_c16a5320fa475530d9583c34fd356ef5`, `F_6364d3f0f495b6ab9dcf8d3b5c6e0b01`, `F_182be0c5cdcd5072bb1864cdee4d3d6e`, `F_e369853df766fa44e1ed0ff613f563bd`, `F_1c383cd30b7c298ab50293adfecb7b18`, `F_19ca14e7ea6328a42e0eb13d585e4c22`, `F_a5bfc9e07964f8dddeb95fc584cd965d`, `F_a5771bce93e200c36f7cd9dfd0e5deaa`, `F_d67d8ab4f4c10bf22aa353e27879133c`, `F_d645920e395fedad7bbbed0eca3fe2e0`, `F_3416a75f4cea9109507cacd8e2f2aefc`, `F_a1d0c6e83f027327d8461063f4ac58a6`, `F_17e62166fc8586dfa4d1bc0e1742c08b`, `F_f7177163c833dff4b38fc8d2872f1ec6`, `F_6c8349cc7260ae62e3b1396831a8398f`, `F_d9d4f495e875a2e075a1a4a6e1b9770f`, `F_67c6a1e7ce56d3d6fa748ab6d9af3fd7`, `F_642e92efb79421734881b53e1e1b18b6`, `F_f457c545a9ded88f18ecee47145a72c0`, `F_c0c7c76d30bd3dcaefc96f40275bdc0a`, `F_2838023a778dfaecdc212708f721b788`, `F_9a1158154dfa42caddbd0694a4e9bdc8`, `F_d82c8d1619ad8176d665453cfb2e55f0`, `F_a684eceee76fc522773286a895bc8436`, `F_b53b3a3d6ab90ce0268229151c9bde11`, `F_9f61408e3afb633e50cdf1b20de6f466`, `F_72b32a1f754ba1c09b3695e0cb6cde7f`, `F_66f041e16a60928b05a7e228a89c3799`, `F_093f65e080a295f8076b1c5722a46aa2`, `F_072b030ba126b2f4b2374f342be9ed44`, `F_7f39f8317fbdb1988ef4c628eba02591`, `F_44f683a84163b3523afe57c2e008bc8c`, `F_03afdbd66e7929b125f8597834fa83a4`, `F_ea5d2f1c4608232e07d3aa3d998e5135`, `F_fc490ca45c00b1249bbe3554a4fdf6fb`, `F_3295c76acbf4caaed33c36b1b5fc2cb1`, `F_735b90b4568125ed6c3f678819b6e058`, `F_a3f390d88e4c41f2747bfa2f1b5f87db`, `F_14bfa6bb14875e45bba028a21ed38046`, `F_7cbbc409ec990f19c78c75bd1e06f215`, `F_e2c420d928d4bf8ce0ff2ec19b371514`, `F_32bb90e8976aab5298d5da10fe66f21d`, `F_d2ddea18f00665ce8623e36bd4e3c7c5`, `F_ad61ab143223efbc24c7d2583be69251`, `F_d09bf41544a3365a46c9077ebb5e35c3`, `F_fbd7939d674997cdb4692d34de8633c4`, `F_28dd2c7955ce926456240b2ff0100bde`, `F_35f4a8d465e6e1edc05f3d8ab658c551`, `F_d1fe173d08e959397adf34b1d77e88d7`, `F_f033ab37c30201f73f142449d037028d`, `F_43ec517d68b6edd3015b3edc9a11367b`, `F_9778d5d219c5080b9a6a17bef029331c`, `F_fe9fc289c3ff0af142b6d3bead98a923`, `F_68d30a9594728bc39aa24be94b319d21`, `F_3ef815416f775098fe977004015c6193`, `F_93db85ed909c13838ff95ccfa94cebd9`, `F_c7e1249ffc03eb9ded908c236bd1996d`, `F_2a38a4a9316c49e5a833517c45d31070`, `F_7647966b7343c29048673252e490f736`, `F_8613985ec49eb8f757ae6439e879bb2a`, `F_54229abfcfa5649e7003b83dd4755294`, `F_92cc227532d17e56e07902b254dfad10`, `F_98dce83da57b0395e163467c9dae521b`, `F_f4b9ec30ad9f68f89b29639786cb62ef`, `F_812b4ba287f5ee0bc9d43bbf5bbe87fb`, `F_26657d5ff9020d2abefe558796b99584`, `F_e2ef524fbf3d9fe611d5a8e90fefdc9c`, `F_ed3d2c21991e3bef5e069713af9fa6ca`, `F_ac627ab1ccbdb62ec96e702f07f6425b`, `F_f899139df5e1059396431415e770c6dd`, `F_38b3eff8baf56627478ec76a704e9b52`, `F_ec8956637a99787bd197eacd77acce5e`, `F_6974ce5ac660610b44d9b9fed0ff9548`, `F_c9e1074f5b3f9fc8ea15d152add07294`, `F_65b9eea6e1cc6bb9f0cd2a47751a186f`, `F_f0935e4cd5920aa6c7c996a5ee53a70f`, `F_a97da629b098b75c294dffdc3e463904`, `F_a3c65c2974270fd093ee8a9bf8ae7d0b`, `F_2723d092b63885e0d7c260cc007e8b9d`, `F_5f93f983524def3dca464469d2cf9f3e`, `F_698d51a19d8a121ce581499d7b701668`, `F_7f6ffaa6bb0b408017b62254211691b5`, `F_73278a4a86960eeb576a8fd4c9ec6997`, `F_5fd0b37cd7dbbb00f97ba6ce92bf5add`, `F_2b44928ae11fb9384c4cf38708677c48`, `F_c45147dee729311ef5b5c3003946c48f`, `F_eb160de1de89d9058fcb0b968dbbbd68`, `F_5ef059938ba799aaa845e1c2e8a762bd`, `F_07e1cd7dca89a1678042477183b7ac3f`, `F_da4fb5c6e93e74d3df8527599fa62642`, `F_4c56ff4ce4aaf9573aa5dff913df997a`, `F_a0a080f42e6f13b3a2df133f073095dd`, `F_202cb962ac59075b964b07152d234b70`, `F_c8ffe9a587b126f152ed3d89a146b445`, `F_3def184ad8f4755ff269862ea77393dd`, `F_069059b7ef840f0c74a814ec9237b6ec`, `F_ec5decca5ed3d6b8079e2e7e7bacc9f2`, `F_76dc611d6ebaafc66cc0879c71b5db5c`, `F_d1f491a404d6854880943e5c3cd9ca25`, `F_9b8619251a19057cff70779273e95aa6`, `F_1afa34a7f984eeabdbb0a7d494132ee5`, `F_65ded5353c5ee48d0b7d48c591b8f430`, `F_9fc3d7152ba9336a670e36d0ed79bc43`, `F_02522a2b2726fb0a03bb19f2d8d9524d`, `F_7f1de29e6da19d22b51c68001e7e0e54`, `F_42a0e188f5033bc65bf8d78622277c4e`, `F_3988c7f88ebcb58c6ce932b957b6f332`, `F_013d407166ec4fa56eb1e1f8cbe183b9`, `F_e00da03b685a0dd18fb6a08af0923de0`, `F_1385974ed5904a438616ff7bdb3f7439`, `F_0f28b5d49b3020afeecd95b4009adf4c`, `F_a8baa56554f96369ab93e4f3bb068c22`, `F_903ce9225fca3e988c2af215d4e544d3`, `F_0a09c8844ba8f0936c20bd791130d6b6`, `F_2b24d495052a8ce66358eb576b8912c8`, `F_a5e00132373a7031000fd987a3c9f87b`, `F_8d5e957f297893487bd98fa830fa6413`, `F_47d1e990583c9c67424d369f3414728e`, `F_f2217062e9a397a1dca429e7d70bc6ca`, `F_7ef605fc8dba5425d6965fbd4c8fbe1f`, `F_a8f15eda80c50adb0e71943adc8015cf`, `F_37a749d808e46495a8da1e5352d03cae`, `F_b3e3e393c77e35a4a3f3cbd1e429b5dc`, `F_1d7f7abc18fcb43975065399b0d1e48e`, `F_2a79ea27c279e471f4d180b08d62b00a`, `F_1c9ac0159c94d8d0cbedc973445af2da`, `F_6c4b761a28b734fe93831e3fb400ce87`, `F_06409663226af2f3114485aa4e0a23b4`, `F_140f6969d5213fd0ece03148e62e461e`, `F_b73ce398c39f506af761d2277d853a92`, `F_bd4c9ab730f5513206b999ec0d90d1fb`, `F_82aa4b0af34c2313a562076992e50aa3`, `F_0777d5c17d4066b82ab86dff8a46af6f`, `F_fa7cdfad1a5aaf8370ebeda47a1ff1c3`, `F_9766527f2b5d3e95d4a733fcfb77bd7e`, `F_7e7757b1e12abcb736ab9a754ffb617a`, `F_5878a7ab84fb43402106c575658472fa`, `F_006f52e9102a8d3be2fe5614f42ba989`, `F_3636638817772e42b59d74cff571fbb3`, `F_149e9677a5989fd342ae44213df68868`, `F_a4a042cf4fd6bfb47701cbc8a1653ada`, `F_1ff8a7b5dc7a7d1f0ed65aaa29c04b1e`, `F_f7e6c85504ce6e82442c770f7c8606f0`, `F_bf8229696f7a3bb4700cfddef19fa23f`, `F_82161242827b703e6acf9c726942a1e4`, `F_38af86134b65d0f10fe33d30dd76442e`, `F_96da2f590cd7246bbde0051047b0d6f7`, `F_8f85517967795eeef66c225f7883bdcb`, `F_8f53295a73878494e9bc8dd6c3c7104f`, `F_045117b0e0a11a242b9765e79cbf113f`, `F_fc221309746013ac554571fbd180e1c8`, `F_4c5bde74a8f110656874902f07378009`, `F_cedebb6e872f539bef8c3f919874e9d7`, `F_6cdd60ea0045eb7a6ec44c54d29ed402`, `F_eecca5b6365d9607ee5a9d336962c534`, `F_9872ed9fc22fc182d371c3e9ed316094`, `F_31fefc0e570cb3860f2a6d4b38c6490d`, `F_9dcb88e0137649590b755372b040afad`, `F_a2557a7b2e94197ff767970b67041697`, `F_cfecdb276f634854f3ef915e2e980c31`, `F_0aa1883c6411f7873cb83dacb17b0afc`, `F_58a2fc6ed39fd083f55d4182bf88826d`, `F_bd686fd640be98efaae0091fa301e613`, `F_a597e50502f5ff68e3e25b9114205d4a`, `F_0336dcbab05b9d5ad24f4333c7658a0e`, `F_084b6fbb10729ed4da8c3d3f5a3ae7c9`, `F_85d8ce590ad8981ca2c8286f79f59954`, `F_0e65972dce68dad4d52d063967f0a705`, `F_84d9ee44e457ddef7f2c4f25dc8fa865`, `F_3644a684f98ea8fe223c713b77189a77`, `F_757b505cfd34c64c85ca5b5690ee5293`, `F_854d6fae5ee42911677c739ee1734486`, `F_e2c0be24560d78c5e599c2a9c9d0bbd2`, `F_274ad4786c3abca69fa097b85867d9a4`, `F_eae27d77ca20db309e056e3d2dcd7d69`, `F_7eabe3a1649ffa2b3ff8c02ebfd5659f`, `F_69adc1e107f7f7d035d7baf04342e1ca`, `F_091d584fced301b442654dd8c23b3fc9`, `F_b1d10e7bafa4421218a51b1e1f1b0ba2`, `F_6f3ef77ac0e3619e98159e9b6febf557`, `F_eb163727917cbba1eea208541a643e74`, `F_1534b76d325a8f591b52d302e7181331`, `F_979d472a84804b9f647bc185a877a8b5`, `F_ca46c1b9512a7a8315fa3c5a946e8265`, `F_3b8a614226a953a8cd9526fca6fe9ba5`, `F_45fbc6d3e05ebd93369ce542e8f2322d`, `F_63dc7ed1010d3c3b8269faf0ba7491d4`, `F_e96ed478dab8595a7dbda4cbcbee168f`, `F_c0e190d8267e36708f955d7ab048990d`, `F_ec8ce6abb3e952a85b8551ba726a1227`, `F_060ad92489947d410d897474079c1477`, `F_bcbe3365e6ac95ea2c0343a2395834dd`, `F_115f89503138416a242f40fb7d7f338e`, `F_13fe9d84310e77f13a6d184dbf1232f3`, `F_d1c38a09acc34845c6be3a127a5aacaf`, `F_9cfdf10e8fc047a44b08ed031e1f0ed1`, `F_705f2172834666788607efbfca35afb3`, `F_74db120f0a8e5646ef5a30154e9f6deb`, `F_57aeee35c98205091e18d1140e9f38cf`, `F_6da9003b743b65f4c0ccd295cc484e57`, `F_9b04d152845ec0a378394003c96da594`, `F_be83ab3ecd0db773eb2dc1b0a17836a1`, `F_e165421110ba03099a1c0393373c5b43`, `F_289dff07669d7a23de0ef88d2f7129e7`, `F_577ef1154f3240ad5b9b413aa7346a1e`, `F_01161aaa0b6d1345dd8fe4e481144d84`, `F_539fd53b59e3bb12d203f45a912eeaf2`, `F_ac1dd209cbcc5e5d1c6e28598e8cbbe8`, `F_555d6702c950ecb729a966504af0a635`, `F_335f5352088d7d9bf74191e006d8e24c`, `F_f340f1b1f65b6df5b5e3f94d95b11daf`, `F_e4a6222cdb5b34375400904f03d8e6a5`, `F_cb70ab375662576bd1ac5aaf16b3fca4`, `F_9188905e74c28e489b44e954ec0b9bca`, `F_0266e33d3f546cb5436a10798e657d97`, `F_38db3aed920cf82ab059bfccbd02be6a`, `F_3cec07e9ba5f5bb252d13f5f431e4bbb`, `F_621bf66ddb7c962aa0d22ac97d69b793`, `F_077e29b11be80ab57e1a2ecabb7da330`, `F_6c9882bbac1c7093bd25041881277658`, `F_19f3cd308f1455b3fa09a282e0d496f4`, `F_03c6b06952c750899bb03d998e631860`, `F_c24cd76e1ce41366a4bbe8a49b02a028`, `F_c52f1bd66cc19d05628bd8bf27af3ad6`, `F_fe131d7f5a6b38b23cc967316c13dae2`, `F_f718499c1c8cef6730f9fd03c8125cab`, `F_d96409bf894217686ba124d7356686c9`, `F_502e4a16930e414107ee22b6198c578f`, `F_cfa0860e83a4c3a763a7e62d825349f7`, `F_a4f23670e1833f3fdb077ca70bbd5d66`, `F_b1a59b315fc9a3002ce38bbe070ec3f5`, `F_36660e59856b4de58a219bcf4e27eba3`, `F_8c19f571e251e61cb8dd3612f26d5ecf`, `F_d6baf65e0b240ce177cf70da146c8dc8`, `F_e56954b4f6347e897f954495eab16a88`, `F_f7664060cc52bc6f3d620bcedc94a4b6`, `F_eda80a3d5b344bc40f3bc04f65b7a357`, `F_8f121ce07d74717e0b1f21d122e04521`, `F_06138bc5af6023646ede0e1f7c1eac75`, `F_39059724f73a9969845dfe4146c5660e`, `F_7f100b7b36092fb9b06dfb4fac360931`, `F_7a614fd06c325499f1680b9896beedeb`, `F_4734ba6f3de83d861c3176a6273cac6d`, `F_d947bf06a885db0d477d707121934ff8`, `F_63923f49e5241343aa7acb6a06a751e7`, `F_db8e1af0cb3aca1ae2d0018624204529`, `F_20f07591c6fcb220ffe637cda29bb3f6`, `F_07cdfd23373b17c6b337251c22b7ea57`, `F_d395771085aab05244a4fb8fd91bf4ee`, `F_92c8c96e4c37100777c7190b76d28233`, `F_e3796ae838835da0b6f6ea37bcf8bcb7`, `F_6a9aeddfc689c1d0e3b9ccc3ab651bc5`, `F_0f49c89d1e7298bb9930789c8ed59d48`, `F_46ba9f2a6976570b0353203ec4474217`, `F_0e01938fc48a2cfb5f2217fbfb00722d`, `F_16a5cdae362b8d27a1d8f8c7b78b4330`, `F_918317b57931b6b7a7d29490fe5ec9f9`, `F_48aedb8880cab8c45637abc7493ecddd`, `F_839ab46820b524afda05122893c2fe8e`, `F_f90f2aca5c640289d0a29417bcb63a37`, `F_9c838d2e45b2ad1094d42f4ef36764f6`, `F_1700002963a49da13542e0726b7bb758`, `F_53c3bce66e43be4f209556518c2fcb54`, `F_6883966fd8f918a4aa29be29d2c386fb`, `F_49182f81e6a13cf5eaa496d51fea6406`, `F_d296c101daa88a51f6ca8cfc1ac79b50`, `F_9fd81843ad7f202f26c1a174c7357585`, `F_26e359e83860db1d11b6acca57d8ea88`, `F_ef0d3930a7b6c95bd2b32ed45989c61f`, `F_94f6d7e04a4d452035300f18b984988c`, `F_34ed066df378efacc9b924ec161e7639`, `F_577bcc914f9e55d5e4e4f82f9f00e7d4`, `F_11b9842e0a271ff252c1903e7132cd68`, `F_37bc2f75bf1bcfe8450a1a41c200364c`, `F_496e05e1aea0a9c4655800e8a7b9ea28`, `F_b2eb7349035754953b57a32e2841bda5`, `F_8e98d81f8217304975ccb23337bb5761`, `F_a8c88a0055f636e4a163a5e3d16adab7`, `F_eddea82ad2755b24c4e168c5fc2ebd40`, `F_06eb61b839a0cefee4967c67ccb099dc`, `F_9dfcd5e558dfa04aaf37f137a1d9d3e5`, `F_950a4152c2b4aa3ad78bdd6b366cc179`, `F_158f3069a435b314a80bdcb024f8e422`, `F_758874998f5bd0c393da094e1967a72b`, `F_ad13a2a07ca4b7642959dc0c4c740ab6`, `F_3fe94a002317b5f9259f82690aeea4cd`, `F_5b8add2a5d98b1a652ea7fd72d942dac`, `F_432aca3a1e345e339f35a30c8f65edce`, `F_8d3bba7425e7c98c50f52ca1b52d3735`, `F_320722549d1751cf3f247855f937b982`, `F_caf1a3dfb505ffed0d024130f58c5cfa`, `F_5737c6ec2e0716f3d8a7a5c4e0de0d9a`, `F_bc6dc48b743dc5d013b1abaebd2faed2`, `F_f2fc990265c712c49d51a18a32b39f0c`, `F_89f0fd5c927d466d6ec9a21b9ac34ffa`, `F_a666587afda6e89aec274a3657558a27`, `F_b83aac23b9528732c23cc7352950e880`, `F_cd00692c3bfe59267d5ecfac5310286c`, `F_6faa8040da20ef399b63a72d0e4ab575`, `F_fe73f687e5bc5280214e0486b273a5f9`) VALUES (NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,1,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL);
-UNLOCK TABLES;
/*!40000 ALTER TABLE `t1` ENABLE KEYS */;
+UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
@@ -1405,12 +1388,11 @@ CREATE TABLE `t1` (
`a` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-
-/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
LOCK TABLES `t1` WRITE;
+/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
INSERT INTO `t1` VALUES (1),(2),(3);
-UNLOCK TABLES;
/*!40000 ALTER TABLE `t1` ENABLE KEYS */;
+UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
@@ -1451,12 +1433,11 @@ CREATE TABLE `t2` (
KEY `a` (`a`(5))
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-
-/*!40000 ALTER TABLE `t2` DISABLE KEYS */;
LOCK TABLES `t2` WRITE;
+/*!40000 ALTER TABLE `t2` DISABLE KEYS */;
INSERT INTO `t2` VALUES ('alfred'),('angie'),('bingo'),('waffle'),('lemon');
-UNLOCK TABLES;
/*!40000 ALTER TABLE `t2` ENABLE KEYS */;
+UNLOCK TABLES;
DROP TABLE IF EXISTS `v2`;
/*!50001 DROP VIEW IF EXISTS `v2`*/;
/*!50001 CREATE TABLE `v2` (
@@ -1775,7 +1756,6 @@ CREATE TABLE `t2` (
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
drop table t1, t2, t3;
-End of 4.1 tests
create table t1 (a binary(1), b blob);
insert into t1 values ('','');
@@ -1795,12 +1775,11 @@ CREATE TABLE `t1` (
`b` blob
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-
-/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
LOCK TABLES `t1` WRITE;
+/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
INSERT INTO `t1` VALUES (0x00,'');
-UNLOCK TABLES;
/*!40000 ALTER TABLE `t1` ENABLE KEYS */;
+UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
@@ -1828,12 +1807,11 @@ CREATE TABLE `t1` (
`b` blob
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-
-/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
LOCK TABLES `t1` WRITE;
+/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
INSERT INTO `t1` VALUES (0x00,'');
-UNLOCK TABLES;
/*!40000 ALTER TABLE `t1` ENABLE KEYS */;
+UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
@@ -1863,11 +1841,10 @@ CREATE TABLE `t1` (
`a` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-
-/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
LOCK TABLES `t1` WRITE;
-UNLOCK TABLES;
+/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
/*!40000 ALTER TABLE `t1` ENABLE KEYS */;
+UNLOCK TABLES;
DROP TABLE IF EXISTS `v1`;
/*!50001 DROP VIEW IF EXISTS `v1`*/;
/*!50001 CREATE TABLE `v1` (
@@ -1919,12 +1896,11 @@ CREATE TABLE `t2` (
KEY `a` (`a`(5))
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-
-/*!40000 ALTER TABLE `t2` DISABLE KEYS */;
LOCK TABLES `t2` WRITE;
+/*!40000 ALTER TABLE `t2` DISABLE KEYS */;
INSERT INTO `t2` VALUES ('alfred'),('angie'),('bingo'),('waffle'),('lemon');
-UNLOCK TABLES;
/*!40000 ALTER TABLE `t2` ENABLE KEYS */;
+UNLOCK TABLES;
DROP TABLE IF EXISTS `v2`;
/*!50001 DROP VIEW IF EXISTS `v2`*/;
/*!50001 CREATE TABLE `v2` (
@@ -1968,12 +1944,11 @@ CREATE TABLE `t1` (
`a` char(10) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-
-/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
LOCK TABLES `t1` WRITE;
+/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
INSERT INTO `t1` VALUES ('\'');
-UNLOCK TABLES;
/*!40000 ALTER TABLE `t1` ENABLE KEYS */;
+UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
@@ -2011,12 +1986,11 @@ CREATE TABLE `t1` (
`c` varchar(30) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-
-/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
LOCK TABLES `t1` WRITE;
+/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
INSERT INTO `t1` VALUES (1,2,'one'),(2,4,'two'),(3,6,'three');
-UNLOCK TABLES;
/*!40000 ALTER TABLE `t1` ENABLE KEYS */;
+UNLOCK TABLES;
DROP TABLE IF EXISTS `v1`;
/*!50001 DROP VIEW IF EXISTS `v1`*/;
/*!50001 CREATE TABLE `v1` (
@@ -2128,12 +2102,11 @@ CREATE TABLE `t1` (
`b` bigint(20) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-
-/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
LOCK TABLES `t1` WRITE;
+/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
INSERT INTO `t1` VALUES (1,NULL),(2,NULL),(4,NULL),(11,NULL);
-UNLOCK TABLES;
/*!40000 ALTER TABLE `t1` ENABLE KEYS */;
+UNLOCK TABLES;
/*!50003 SET @OLD_SQL_MODE=@@SQL_MODE*/;
DELIMITER ;;
@@ -2164,11 +2137,10 @@ CREATE TABLE `t2` (
`a` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-
-/*!40000 ALTER TABLE `t2` DISABLE KEYS */;
LOCK TABLES `t2` WRITE;
-UNLOCK TABLES;
+/*!40000 ALTER TABLE `t2` DISABLE KEYS */;
/*!40000 ALTER TABLE `t2` ENABLE KEYS */;
+UNLOCK TABLES;
/*!50003 SET @OLD_SQL_MODE=@@SQL_MODE*/;
DELIMITER ;;
@@ -2212,22 +2184,20 @@ CREATE TABLE `t1` (
`b` bigint(20) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-
-/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
LOCK TABLES `t1` WRITE;
+/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
INSERT INTO `t1` VALUES (1,NULL),(2,NULL),(4,NULL),(11,NULL);
-UNLOCK TABLES;
/*!40000 ALTER TABLE `t1` ENABLE KEYS */;
+UNLOCK TABLES;
DROP TABLE IF EXISTS `t2`;
CREATE TABLE `t2` (
`a` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-
-/*!40000 ALTER TABLE `t2` DISABLE KEYS */;
LOCK TABLES `t2` WRITE;
-UNLOCK TABLES;
+/*!40000 ALTER TABLE `t2` DISABLE KEYS */;
/*!40000 ALTER TABLE `t2` ENABLE KEYS */;
+UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
@@ -2341,12 +2311,11 @@ CREATE TABLE `t1` (
`id` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-
-/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
LOCK TABLES `t1` WRITE;
+/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
INSERT INTO `t1` VALUES (1),(2),(3),(4),(5);
-UNLOCK TABLES;
/*!40000 ALTER TABLE `t1` ENABLE KEYS */;
+UNLOCK TABLES;
DELIMITER ;;
/*!50003 DROP FUNCTION IF EXISTS `bug9056_func1` */;;
/*!50003 SET SESSION SQL_MODE=""*/;;
@@ -2430,12 +2399,11 @@ CREATE TABLE `t1` (
UNIQUE KEY `d` (`d`)
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-
-/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
LOCK TABLES `t1` WRITE;
+/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
INSERT INTO `t1` VALUES ('2003-10-25 22:00:00'),('2003-10-25 23:00:00');
-UNLOCK TABLES;
/*!40000 ALTER TABLE `t1` ENABLE KEYS */;
+UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
@@ -2465,12 +2433,11 @@ CREATE TABLE `t1` (
UNIQUE KEY `d` (`d`)
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-
-/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
LOCK TABLES `t1` WRITE;
+/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
INSERT INTO `t1` VALUES ('2003-10-26 02:00:00'),('2003-10-26 02:00:00');
-UNLOCK TABLES;
/*!40000 ALTER TABLE `t1` ENABLE KEYS */;
+UNLOCK TABLES;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
@@ -2512,12 +2479,11 @@ CREATE TABLE "t1 test" (
"a1" int(11) DEFAULT NULL
);
-
-/*!40000 ALTER TABLE "t1 test" DISABLE KEYS */;
LOCK TABLES "t1 test" WRITE;
+/*!40000 ALTER TABLE "t1 test" DISABLE KEYS */;
INSERT INTO "t1 test" VALUES (1),(2),(3);
-UNLOCK TABLES;
/*!40000 ALTER TABLE "t1 test" ENABLE KEYS */;
+UNLOCK TABLES;
/*!50003 SET @OLD_SQL_MODE=@@SQL_MODE*/;
DELIMITER ;;
@@ -2532,12 +2498,11 @@ CREATE TABLE "t2 test" (
"a2" int(11) DEFAULT NULL
);
-
-/*!40000 ALTER TABLE "t2 test" DISABLE KEYS */;
LOCK TABLES "t2 test" WRITE;
+/*!40000 ALTER TABLE "t2 test" DISABLE KEYS */;
INSERT INTO "t2 test" VALUES (1),(2),(3);
-UNLOCK TABLES;
/*!40000 ALTER TABLE "t2 test" ENABLE KEYS */;
+UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
@@ -2583,12 +2548,11 @@ CREATE TABLE `t1` (
`c` varchar(32) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-
-/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
LOCK TABLES `t1` WRITE;
+/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
INSERT INTO `t1` VALUES (1,'first value','xxxx'),(2,'second value','tttt'),(3,'third value','vvv vvv');
-UNLOCK TABLES;
/*!40000 ALTER TABLE `t1` ENABLE KEYS */;
+UNLOCK TABLES;
DROP TABLE IF EXISTS `v0`;
/*!50001 DROP VIEW IF EXISTS `v0`*/;
/*!50001 CREATE TABLE `v0` (
@@ -2661,12 +2625,11 @@ CREATE TABLE `t1` (
`b` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-
-/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
LOCK TABLES `t1` WRITE;
+/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
REPLACE INTO `t1` VALUES (1,1),(2,3),(3,4),(4,5);
-UNLOCK TABLES;
/*!40000 ALTER TABLE `t1` ENABLE KEYS */;
+UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
@@ -2707,11 +2670,10 @@ CREATE TABLE `t1` (
`a` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-
-/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
LOCK TABLES `t1` WRITE;
-UNLOCK TABLES;
+/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
/*!40000 ALTER TABLE `t1` ENABLE KEYS */;
+UNLOCK TABLES;
/*!50003 SET @OLD_SQL_MODE=@@SQL_MODE*/;
DELIMITER ;;
@@ -2734,6 +2696,7 @@ DELIMITER ;
DROP TRIGGER tr1;
DROP TABLE t1;
+End of 4.1 tests
create table t1 (a int);
insert into t1 values (289), (298), (234), (456), (789);
create definer = CURRENT_USER view v1 as select * from t1;
@@ -2782,6 +2745,25 @@ end AFTER # root@localhost
drop trigger tr1;
drop trigger tr2;
drop table t1, t2;
+create table t (qty int, price int);
+insert into t values(3, 50);
+insert into t values(5, 51);
+create view v1 as select qty, price, qty*price as value from t;
+create view v2 as select qty from v1;
+mysqldump {
+/*!50001 CREATE ALGORITHM=UNDEFINED */
+/*!50013 DEFINER=`root`@`localhost` SQL SECURITY DEFINER */
+/*!50001 VIEW `v1` AS select `t`.`qty` AS `qty`,`t`.`price` AS `price`,(`t`.`qty` * `t`.`price`) AS `value` from `t` */;
+
+} mysqldump {
+/*!50001 CREATE ALGORITHM=UNDEFINED */
+/*!50013 DEFINER=`root`@`localhost` SQL SECURITY DEFINER */
+/*!50001 VIEW `v2` AS select `v1`.`qty` AS `qty` from `v1` */;
+
+} mysqldump
+drop view v1;
+drop view v2;
+drop table t;
/*!50003 CREATE FUNCTION `f`() RETURNS bigint(20)
return 42 */|
/*!50003 CREATE PROCEDURE `p`()
@@ -2796,6 +2778,135 @@ p CREATE DEFINER=`root`@`localhost` PROCEDURE `p`()
select 42
drop function f;
drop procedure p;
+create table t1 ( id serial );
+create view v1 as select * from t1;
+drop table t1;
+mysqldump {
+
+-- failed on view `v1`: CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `test`.`t1`.`id` AS `id` from `t1`
+
+} mysqldump
+drop view v1;
+create database mysqldump_test_db;
+use mysqldump_test_db;
+create table t1 (id int);
+create view v1 as select * from t1;
+insert into t1 values (1232131);
+insert into t1 values (4711);
+insert into t1 values (3231);
+insert into t1 values (0815);
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8 */;
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+
+/*!40000 DROP DATABASE IF EXISTS `mysqldump_test_db`*/;
+
+CREATE DATABASE /*!32312 IF NOT EXISTS*/ `mysqldump_test_db` /*!40100 DEFAULT CHARACTER SET latin1 */;
+
+USE `mysqldump_test_db`;
+DROP TABLE IF EXISTS `t1`;
+CREATE TABLE `t1` (
+ `id` int(11) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1;
+
+LOCK TABLES `t1` WRITE;
+/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
+INSERT INTO `t1` VALUES (1232131),(4711),(3231),(815);
+/*!40000 ALTER TABLE `t1` ENABLE KEYS */;
+UNLOCK TABLES;
+DROP TABLE IF EXISTS `v1`;
+/*!50001 DROP VIEW IF EXISTS `v1`*/;
+/*!50001 CREATE TABLE `v1` (
+ `id` int(11)
+) */;
+/*!50001 DROP TABLE IF EXISTS `v1`*/;
+/*!50001 DROP VIEW IF EXISTS `v1`*/;
+/*!50001 CREATE ALGORITHM=UNDEFINED */
+/*!50013 DEFINER=`root`@`localhost` SQL SECURITY DEFINER */
+/*!50001 VIEW `v1` AS select `t1`.`id` AS `id` from `t1` */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
+/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
+/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+drop view v1;
+drop table t1;
+drop database mysqldump_test_db;
+create database mysqldump_tables;
+use mysqldump_tables;
+create table basetable ( id serial, tag varchar(64) );
+create database mysqldump_views;
+use mysqldump_views;
+create view nasishnasifu as select mysqldump_tables.basetable.id from mysqldump_tables.basetable;
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8 */;
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+
+CREATE DATABASE /*!32312 IF NOT EXISTS*/ `mysqldump_tables` /*!40100 DEFAULT CHARACTER SET latin1 */;
+
+USE `mysqldump_tables`;
+DROP TABLE IF EXISTS `basetable`;
+CREATE TABLE `basetable` (
+ `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ `tag` varchar(64) DEFAULT NULL,
+ UNIQUE KEY `id` (`id`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1;
+
+LOCK TABLES `basetable` WRITE;
+/*!40000 ALTER TABLE `basetable` DISABLE KEYS */;
+/*!40000 ALTER TABLE `basetable` ENABLE KEYS */;
+UNLOCK TABLES;
+
+CREATE DATABASE /*!32312 IF NOT EXISTS*/ `mysqldump_views` /*!40100 DEFAULT CHARACTER SET latin1 */;
+
+USE `mysqldump_views`;
+DROP TABLE IF EXISTS `nasishnasifu`;
+/*!50001 DROP VIEW IF EXISTS `nasishnasifu`*/;
+/*!50001 CREATE TABLE `nasishnasifu` (
+ `id` bigint(20) unsigned
+) */;
+/*!50001 DROP TABLE IF EXISTS `nasishnasifu`*/;
+/*!50001 DROP VIEW IF EXISTS `nasishnasifu`*/;
+/*!50001 CREATE ALGORITHM=UNDEFINED */
+/*!50013 DEFINER=`root`@`localhost` SQL SECURITY DEFINER */
+/*!50001 VIEW `mysqldump_views`.`nasishnasifu` AS select `mysqldump_tables`.`basetable`.`id` AS `id` from `mysqldump_tables`.`basetable` */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
+/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
+/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+drop view nasishnasifu;
+drop database mysqldump_views;
+drop table mysqldump_tables.basetable;
+drop database mysqldump_tables;
+use test;
+End of 5.0 tests
create table t1 (a text , b text);
create table t2 (a text , b text);
insert t1 values ("Duck, Duck", "goose");
@@ -3038,12 +3149,11 @@ CREATE TABLE `t1` (
`id` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-
-/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
LOCK TABLES `t1` WRITE;
+/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
INSERT INTO `t1` VALUES (1232131),(4711),(3231),(815);
-UNLOCK TABLES;
/*!40000 ALTER TABLE `t1` ENABLE KEYS */;
+UNLOCK TABLES;
DROP TABLE IF EXISTS `v1`;
/*!50001 DROP VIEW IF EXISTS `v1`*/;
/*!50001 CREATE TABLE `v1` (
@@ -3067,3 +3177,4 @@ DROP TABLE IF EXISTS `v1`;
drop view v1;
drop table t1;
drop database mysqldump_test_db;
+End of 5.1 tests
diff --git a/mysql-test/r/mysqltest.result b/mysql-test/r/mysqltest.result
index 1a18bf3f110..ef4dd83564b 100644
--- a/mysql-test/r/mysqltest.result
+++ b/mysql-test/r/mysqltest.result
@@ -168,8 +168,8 @@ source database
- most popular open
- source database
- MySQL: The world''s
--- most popular open
--- source database
+-- most popular
+-- open source database
# MySQL: The
--world''s
# most popular
@@ -195,8 +195,8 @@ source database
# source database
-- MySQL: The
-- world''s most
--- popular open
--- source database
+-- popular
+-- open source database
# MySQL: The
- world''s most
-- popular open
diff --git a/mysql-test/r/ndb_binlog_discover.result b/mysql-test/r/ndb_binlog_discover.result
index 2a1bf6efa84..01e15dc1c39 100644
--- a/mysql-test/r/ndb_binlog_discover.result
+++ b/mysql-test/r/ndb_binlog_discover.result
@@ -1,7 +1,6 @@
drop table if exists t1;
create table t1 (a int key) engine=ndb;
reset master;
-insert into t1 values(1);
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # BEGIN
diff --git a/mysql-test/r/ndb_cache_multi.result b/mysql-test/r/ndb_cache_multi.result
index c7135ed9e8a..388131ec30a 100644
--- a/mysql-test/r/ndb_cache_multi.result
+++ b/mysql-test/r/ndb_cache_multi.result
@@ -70,3 +70,5 @@ show status like "Qcache_hits";
Variable_name Value
Qcache_hits 0
drop table t1, t2;
+set GLOBAL query_cache_size=0;
+set GLOBAL query_cache_size=0;
diff --git a/mysql-test/r/ndb_dd_advance.result b/mysql-test/r/ndb_dd_advance.result
new file mode 100644
index 00000000000..09fe75805d5
--- /dev/null
+++ b/mysql-test/r/ndb_dd_advance.result
@@ -0,0 +1,1088 @@
+DROP TABLE IF EXISTS test.t1;
+DROP TABLE IF EXISTS test.t2;
+**** Test Setup Section ****
+CREATE LOGFILE GROUP log_group1
+ADD UNDOFILE './log_group1/undofile.dat'
+INITIAL_SIZE 16M
+UNDO_BUFFER_SIZE = 1M
+ENGINE=NDB;
+CREATE TABLESPACE table_space1
+ADD DATAFILE './table_space1/datafile.dat'
+USE LOGFILE GROUP log_group1
+INITIAL_SIZE 12M
+ENGINE NDB;
+CREATE TABLE test.t1
+(pk1 INT NOT NULL PRIMARY KEY, b INT NOT NULL, c INT NOT NULL)
+TABLESPACE table_space1 STORAGE DISK
+ENGINE=NDB;
+CREATE TABLE test.t2
+(pk2 INT NOT NULL PRIMARY KEY, b2 INT NOT NULL, c2 INT NOT NULL)
+ENGINE=NDB;
+
+**** Data load for first test ****
+INSERT INTO test.t1 VALUES
+(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),
+(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10),
+(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15),
+(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20),
+(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25),
+(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30),
+(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35),
+(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40),
+(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45),
+(46,46,46),(47,47,47),(48,48,48),(49,49,49),(50,50,50),
+(51,51,51),(52,52,52),(53,53,53),(54,54,54),(55,55,55),
+(56,56,56),(57,57,57),(58,58,58),(59,59,59),(60,60,60),
+(61,61,61),(62,62,62),(63,63,63),(64,64,64),(65,65,65),
+(66,66,66),(67,67,67),(68,68,68),(69,69,69),(70,70,70),
+(71,71,71),(72,72,72),(73,73,73),(74,74,74),(75,75,75);
+INSERT INTO test.t2 VALUES
+(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),
+(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10),
+(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15),
+(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20),
+(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25),
+(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30),
+(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35),
+(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40),
+(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45),
+(46,46,46),(47,47,47),(48,48,48),(49,49,49),(50,50,50),
+(51,51,51),(52,52,52),(53,53,53),(54,54,54),(55,55,55),
+(56,56,56),(57,57,57),(58,58,58),(59,59,59),(60,60,60),
+(61,61,61),(62,62,62),(63,63,63),(64,64,64),(65,65,65),
+(66,66,66),(67,67,67),(68,68,68),(69,69,69),(70,70,70),
+(71,71,71),(72,72,72),(73,73,73),(74,74,74),(75,75,75);
+
+*** Test 1 Section Begins ***
+SELECT COUNT(*) FROM test.t2 LEFT JOIN test.t1 ON pk2=pk1 WHERE b2 IN (4);
+COUNT(*)
+1
+SELECT * FROM test.t2 LEFT JOIN test.t1 ON pk2=pk1 WHERE b2 IN (4);
+pk2 b2 c2 pk1 b c
+4 4 4 4 4 4
+SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 WHERE b IN (4);
+COUNT(*)
+1
+SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON b=b2 WHERE pk1 IN (75);
+COUNT(*)
+1
+SELECT b, c FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 ORDER BY b;
+b c
+1 1
+2 2
+3 3
+4 4
+5 5
+6 6
+7 7
+8 8
+9 9
+10 10
+11 11
+12 12
+13 13
+14 14
+15 15
+16 16
+17 17
+18 18
+19 19
+20 20
+21 21
+22 22
+23 23
+24 24
+25 25
+26 26
+27 27
+28 28
+29 29
+30 30
+31 31
+32 32
+33 33
+34 34
+35 35
+36 36
+37 37
+38 38
+39 39
+40 40
+41 41
+42 42
+43 43
+44 44
+45 45
+46 46
+47 47
+48 48
+49 49
+50 50
+51 51
+52 52
+53 53
+54 54
+55 55
+56 56
+57 57
+58 58
+59 59
+60 60
+61 61
+62 62
+63 63
+64 64
+65 65
+66 66
+67 67
+68 68
+69 69
+70 70
+71 71
+72 72
+73 73
+74 74
+75 75
+
+*** Setup for test 2 ****
+DELETE FROM test.t1;
+INSERT INTO test.t1 VALUES
+(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),
+(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10),
+(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15),
+(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20),
+(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25),
+(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30),
+(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35),
+(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40),
+(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45);
+
+**** Test Section 2 ****
+SELECT b, c FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 ORDER BY b;
+b c
+1 1
+2 2
+3 3
+4 4
+5 5
+6 6
+7 7
+8 8
+9 9
+10 10
+11 11
+12 12
+13 13
+14 14
+15 15
+16 16
+17 17
+18 18
+19 19
+20 20
+21 21
+22 22
+23 23
+24 24
+25 25
+26 26
+27 27
+28 28
+29 29
+30 30
+31 31
+32 32
+33 33
+34 34
+35 35
+36 36
+37 37
+38 38
+39 39
+40 40
+41 41
+42 42
+43 43
+44 44
+45 45
+SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON b=b2;
+COUNT(*)
+45
+SELECT COUNT(*) FROM test.t1 RIGHT JOIN test.t2 ON b=b2;
+COUNT(*)
+75
+SHOW CREATE TABLE test.t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `pk2` int(11) NOT NULL,
+ `b2` int(11) NOT NULL,
+ `c2` int(11) NOT NULL,
+ PRIMARY KEY (`pk2`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1
+SHOW CREATE TABLE test.t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `pk1` int(11) NOT NULL,
+ `b` int(11) NOT NULL,
+ `c` int(11) NOT NULL,
+ PRIMARY KEY (`pk1`)
+) TABLESPACE table_space1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1
+ALTER TABLE test.t2 TABLESPACE table_space1 STORAGE DISK
+ENGINE=NDB;
+SHOW CREATE TABLE test.t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `pk2` int(11) NOT NULL,
+ `b2` int(11) NOT NULL,
+ `c2` int(11) NOT NULL,
+ PRIMARY KEY (`pk2`)
+) TABLESPACE table_space1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1
+ALTER TABLE test.t1 ENGINE=NDBCLUSTER;
+SHOW CREATE TABLE test.t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `pk1` int(11) NOT NULL,
+ `b` int(11) NOT NULL,
+ `c` int(11) NOT NULL,
+ PRIMARY KEY (`pk1`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1
+
+DROP TABLE test.t1;
+DROP TABLE test.t2;
+*** Setup for Test Section 3 ***
+CREATE TABLE test.t1 (
+usr_id INT unsigned NOT NULL,
+uniq_id INT unsigned NOT NULL AUTO_INCREMENT,
+start_num INT unsigned NOT NULL DEFAULT 1,
+increment INT unsigned NOT NULL DEFAULT 1,
+PRIMARY KEY (uniq_id),
+INDEX usr_uniq_idx (usr_id, uniq_id),
+INDEX uniq_usr_idx (uniq_id, usr_id))
+TABLESPACE table_space1 STORAGE DISK
+ENGINE=NDB;
+CREATE TABLE test.t2 (
+id INT unsigned NOT NULL DEFAULT 0,
+usr2_id INT unsigned NOT NULL DEFAULT 0,
+max INT unsigned NOT NULL DEFAULT 0,
+c_amount INT unsigned NOT NULL DEFAULT 0,
+d_max INT unsigned NOT NULL DEFAULT 0,
+d_num INT unsigned NOT NULL DEFAULT 0,
+orig_time INT unsigned NOT NULL DEFAULT 0,
+c_time INT unsigned NOT NULL DEFAULT 0,
+active ENUM ("no","yes") NOT NULL,
+PRIMARY KEY (id,usr2_id),
+INDEX id_idx (id),
+INDEX usr2_idx (usr2_id))
+ENGINE=NDB;
+INSERT INTO test.t1 VALUES (3,NULL,0,50),(3,NULL,0,200),(3,NULL,0,25),(3,NULL,0,84676),(3,NULL,0,235),(3,NULL,0,10),(3,NULL,0,3098),(3,NULL,0,2947),(3,NULL,0,8987),(3,NULL,0,8347654),(3,NULL,0,20398),(3,NULL,0,8976),(3,NULL,0,500),(3,NULL,0,198);
+
+**** Test Section 3 ****
+SELECT test.t1.usr_id,test.t1.uniq_id,test.t1.increment,
+test.t2.usr2_id,test.t2.c_amount,test.t2.max
+FROM test.t1
+LEFT JOIN test.t2 ON test.t2.id = test.t1.uniq_id
+WHERE test.t1.uniq_id = 4
+ORDER BY test.t2.c_amount;
+usr_id uniq_id increment usr2_id c_amount max
+3 4 84676 NULL NULL NULL
+INSERT INTO test.t2 VALUES (2,3,3000,6000,0,0,746584,837484,'yes');
+INSERT INTO test.t2 VALUES (4,3,3000,6000,0,0,746584,837484,'yes');
+INSERT INTO test.t2 VALUES (7,3,1000,2000,0,0,746294,937484,'yes');
+SELECT test.t1.usr_id,test.t1.uniq_id,test.t1.increment,
+test.t2.usr2_id,test.t2.c_amount,test.t2.max
+FROM test.t1
+LEFT JOIN test.t2 ON test.t2.id = test.t1.uniq_id
+WHERE test.t1.uniq_id = 4
+ORDER BY test.t2.c_amount;
+usr_id uniq_id increment usr2_id c_amount max
+3 4 84676 3 6000 3000
+
+DROP TABLE test.t1;
+DROP TABLE test.t2;
+ALTER TABLESPACE table_space1
+DROP DATAFILE './table_space1/datafile.dat'
+ENGINE = NDB;
+DROP TABLESPACE table_space1
+ENGINE = NDB;
+DROP LOGFILE GROUP log_group1
+ENGINE =NDB;
+CREATE LOGFILE GROUP lg
+ADD UNDOFILE './lg_group/undofile.dat'
+ INITIAL_SIZE 16M
+UNDO_BUFFER_SIZE = 1M
+ENGINE=NDB;
+CREATE TABLESPACE ts1
+ADD DATAFILE './table_space1/datafile.dat'
+ USE LOGFILE GROUP lg
+INITIAL_SIZE 12M
+ENGINE NDB;
+CREATE TABLESPACE ts2
+ADD DATAFILE './table_space2/datafile.dat'
+ USE LOGFILE GROUP lg
+INITIAL_SIZE 12M
+ENGINE NDB;
+CREATE TABLE t1 (a int NOT NULL PRIMARY KEY, b int)
+TABLESPACE ts1 STORAGE DISK ENGINE=NDB;
+CREATE TABLE t2 (a int NOT NULL PRIMARY KEY, b int)
+ENGINE=NDB;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) TABLESPACE ts1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) NOT NULL,
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES (1,1);
+INSERT INTO t1 VALUES (2,2);
+SELECT * FROM t1 order by a;
+a b
+1 1
+2 2
+INSERT INTO t2(a,b) SELECT * FROM t1;
+SELECT * FROM t2 order by a;
+a b
+1 1
+2 2
+TRUNCATE t1;
+TRUNCATE t2;
+INSERT INTO t2 VALUES (3,3);
+INSERT INTO t2 VALUES (4,4);
+INSERT INTO t1(a,b) SELECT * FROM t2;
+SELECT * FROM t1 order by a;
+a b
+3 3
+4 4
+DROP TABLE t1, t2;
+ALTER TABLESPACE ts1
+DROP DATAFILE './table_space1/datafile.dat'
+ ENGINE NDB;
+DROP TABLESPACE ts1 ENGINE NDB;
+ALTER TABLESPACE ts2
+DROP DATAFILE './table_space2/datafile.dat'
+ ENGINE NDB;
+DROP TABLESPACE ts2 ENGINE NDB;
+DROP LOGFILE GROUP lg
+ENGINE=NDB;
+CREATE LOGFILE GROUP lg
+ADD UNDOFILE './undofile.dat'
+ INITIAL_SIZE 16M
+UNDO_BUFFER_SIZE = 1M
+ENGINE=NDB;
+CREATE TABLESPACE ts
+ADD DATAFILE './datafile.dat'
+ USE LOGFILE GROUP lg
+INITIAL_SIZE 12M
+ENGINE NDB;
+CREATE TABLE test.t (
+a smallint NOT NULL,
+b int NOT NULL,
+c bigint NOT NULL,
+d char(10),
+e TEXT,
+f VARCHAR(255),
+PRIMARY KEY(a)
+) TABLESPACE ts STORAGE DISK ENGINE=NDB;
+ALTER TABLE test.t ADD INDEX (d), ADD INDEX (f);
+SHOW CREATE TABLE test.t;
+Table Create Table
+t CREATE TABLE `t` (
+ `a` smallint(6) NOT NULL,
+ `b` int(11) NOT NULL,
+ `c` bigint(20) NOT NULL,
+ `d` char(10) DEFAULT NULL,
+ `e` text,
+ `f` varchar(255) DEFAULT NULL,
+ PRIMARY KEY (`a`),
+ KEY `d` (`d`),
+ KEY `f` (`f`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1
+SELECT * FROM test.t order by a;
+a b c d e f
+1 2 3 aaa1 bbb1 ccccc1
+2 3 4 aaa2 bbb2 ccccc2
+3 4 5 aaa3 bbb3 ccccc3
+4 5 6 aaa4 bbb4 ccccc4
+5 6 7 aaa5 bbb5 ccccc5
+6 7 8 aaa6 bbb6 ccccc6
+7 8 9 aaa7 bbb7 ccccc7
+8 9 10 aaa8 bbb8 ccccc8
+9 10 11 aaa9 bbb9 ccccc9
+10 11 12 aaa10 bbb10 ccccc10
+11 12 13 aaa11 bbb11 ccccc11
+12 13 14 aaa12 bbb12 ccccc12
+13 14 15 aaa13 bbb13 ccccc13
+14 15 16 aaa14 bbb14 ccccc14
+15 16 17 aaa15 bbb15 ccccc15
+16 17 18 aaa16 bbb16 ccccc16
+17 18 19 aaa17 bbb17 ccccc17
+18 19 20 aaa18 bbb18 ccccc18
+19 20 21 aaa19 bbb19 ccccc19
+20 21 22 aaa20 bbb20 ccccc20
+21 22 23 aaa21 bbb21 ccccc21
+22 23 24 aaa22 bbb22 ccccc22
+23 24 25 aaa23 bbb23 ccccc23
+24 25 26 aaa24 bbb24 ccccc24
+25 26 27 aaa25 bbb25 ccccc25
+26 27 28 aaa26 bbb26 ccccc26
+27 28 29 aaa27 bbb27 ccccc27
+28 29 30 aaa28 bbb28 ccccc28
+29 30 31 aaa29 bbb29 ccccc29
+30 31 32 aaa30 bbb30 ccccc30
+31 32 33 aaa31 bbb31 ccccc31
+32 33 34 aaa32 bbb32 ccccc32
+33 34 35 aaa33 bbb33 ccccc33
+34 35 36 aaa34 bbb34 ccccc34
+35 36 37 aaa35 bbb35 ccccc35
+36 37 38 aaa36 bbb36 ccccc36
+37 38 39 aaa37 bbb37 ccccc37
+38 39 40 aaa38 bbb38 ccccc38
+39 40 41 aaa39 bbb39 ccccc39
+40 41 42 aaa40 bbb40 ccccc40
+41 42 43 aaa41 bbb41 ccccc41
+42 43 44 aaa42 bbb42 ccccc42
+43 44 45 aaa43 bbb43 ccccc43
+44 45 46 aaa44 bbb44 ccccc44
+45 46 47 aaa45 bbb45 ccccc45
+46 47 48 aaa46 bbb46 ccccc46
+47 48 49 aaa47 bbb47 ccccc47
+48 49 50 aaa48 bbb48 ccccc48
+49 50 51 aaa49 bbb49 ccccc49
+50 51 52 aaa50 bbb50 ccccc50
+51 52 53 aaa51 bbb51 ccccc51
+52 53 54 aaa52 bbb52 ccccc52
+53 54 55 aaa53 bbb53 ccccc53
+54 55 56 aaa54 bbb54 ccccc54
+55 56 57 aaa55 bbb55 ccccc55
+56 57 58 aaa56 bbb56 ccccc56
+57 58 59 aaa57 bbb57 ccccc57
+58 59 60 aaa58 bbb58 ccccc58
+59 60 61 aaa59 bbb59 ccccc59
+60 61 62 aaa60 bbb60 ccccc60
+61 62 63 aaa61 bbb61 ccccc61
+62 63 64 aaa62 bbb62 ccccc62
+63 64 65 aaa63 bbb63 ccccc63
+64 65 66 aaa64 bbb64 ccccc64
+65 66 67 aaa65 bbb65 ccccc65
+66 67 68 aaa66 bbb66 ccccc66
+67 68 69 aaa67 bbb67 ccccc67
+68 69 70 aaa68 bbb68 ccccc68
+69 70 71 aaa69 bbb69 ccccc69
+70 71 72 aaa70 bbb70 ccccc70
+71 72 73 aaa71 bbb71 ccccc71
+72 73 74 aaa72 bbb72 ccccc72
+73 74 75 aaa73 bbb73 ccccc73
+74 75 76 aaa74 bbb74 ccccc74
+75 76 77 aaa75 bbb75 ccccc75
+76 77 78 aaa76 bbb76 ccccc76
+77 78 79 aaa77 bbb77 ccccc77
+78 79 80 aaa78 bbb78 ccccc78
+79 80 81 aaa79 bbb79 ccccc79
+80 81 82 aaa80 bbb80 ccccc80
+81 82 83 aaa81 bbb81 ccccc81
+82 83 84 aaa82 bbb82 ccccc82
+83 84 85 aaa83 bbb83 ccccc83
+84 85 86 aaa84 bbb84 ccccc84
+85 86 87 aaa85 bbb85 ccccc85
+86 87 88 aaa86 bbb86 ccccc86
+87 88 89 aaa87 bbb87 ccccc87
+88 89 90 aaa88 bbb88 ccccc88
+89 90 91 aaa89 bbb89 ccccc89
+90 91 92 aaa90 bbb90 ccccc90
+91 92 93 aaa91 bbb91 ccccc91
+92 93 94 aaa92 bbb92 ccccc92
+93 94 95 aaa93 bbb93 ccccc93
+94 95 96 aaa94 bbb94 ccccc94
+95 96 97 aaa95 bbb95 ccccc95
+96 97 98 aaa96 bbb96 ccccc96
+97 98 99 aaa97 bbb97 ccccc97
+98 99 100 aaa98 bbb98 ccccc98
+99 100 101 aaa99 bbb99 ccccc99
+100 101 102 aaa100 bbb100 ccccc100
+DROP TABLE test.t;
+USE test;
+show tables;
+Tables_in_test
+t
+SELECT * FROM test.t order by a;
+a b c d e f
+1 2 3 aaa1 bbb1 ccccc1
+2 3 4 aaa2 bbb2 ccccc2
+3 4 5 aaa3 bbb3 ccccc3
+4 5 6 aaa4 bbb4 ccccc4
+5 6 7 aaa5 bbb5 ccccc5
+6 7 8 aaa6 bbb6 ccccc6
+7 8 9 aaa7 bbb7 ccccc7
+8 9 10 aaa8 bbb8 ccccc8
+9 10 11 aaa9 bbb9 ccccc9
+10 11 12 aaa10 bbb10 ccccc10
+11 12 13 aaa11 bbb11 ccccc11
+12 13 14 aaa12 bbb12 ccccc12
+13 14 15 aaa13 bbb13 ccccc13
+14 15 16 aaa14 bbb14 ccccc14
+15 16 17 aaa15 bbb15 ccccc15
+16 17 18 aaa16 bbb16 ccccc16
+17 18 19 aaa17 bbb17 ccccc17
+18 19 20 aaa18 bbb18 ccccc18
+19 20 21 aaa19 bbb19 ccccc19
+20 21 22 aaa20 bbb20 ccccc20
+21 22 23 aaa21 bbb21 ccccc21
+22 23 24 aaa22 bbb22 ccccc22
+23 24 25 aaa23 bbb23 ccccc23
+24 25 26 aaa24 bbb24 ccccc24
+25 26 27 aaa25 bbb25 ccccc25
+26 27 28 aaa26 bbb26 ccccc26
+27 28 29 aaa27 bbb27 ccccc27
+28 29 30 aaa28 bbb28 ccccc28
+29 30 31 aaa29 bbb29 ccccc29
+30 31 32 aaa30 bbb30 ccccc30
+31 32 33 aaa31 bbb31 ccccc31
+32 33 34 aaa32 bbb32 ccccc32
+33 34 35 aaa33 bbb33 ccccc33
+34 35 36 aaa34 bbb34 ccccc34
+35 36 37 aaa35 bbb35 ccccc35
+36 37 38 aaa36 bbb36 ccccc36
+37 38 39 aaa37 bbb37 ccccc37
+38 39 40 aaa38 bbb38 ccccc38
+39 40 41 aaa39 bbb39 ccccc39
+40 41 42 aaa40 bbb40 ccccc40
+41 42 43 aaa41 bbb41 ccccc41
+42 43 44 aaa42 bbb42 ccccc42
+43 44 45 aaa43 bbb43 ccccc43
+44 45 46 aaa44 bbb44 ccccc44
+45 46 47 aaa45 bbb45 ccccc45
+46 47 48 aaa46 bbb46 ccccc46
+47 48 49 aaa47 bbb47 ccccc47
+48 49 50 aaa48 bbb48 ccccc48
+49 50 51 aaa49 bbb49 ccccc49
+50 51 52 aaa50 bbb50 ccccc50
+51 52 53 aaa51 bbb51 ccccc51
+52 53 54 aaa52 bbb52 ccccc52
+53 54 55 aaa53 bbb53 ccccc53
+54 55 56 aaa54 bbb54 ccccc54
+55 56 57 aaa55 bbb55 ccccc55
+56 57 58 aaa56 bbb56 ccccc56
+57 58 59 aaa57 bbb57 ccccc57
+58 59 60 aaa58 bbb58 ccccc58
+59 60 61 aaa59 bbb59 ccccc59
+60 61 62 aaa60 bbb60 ccccc60
+61 62 63 aaa61 bbb61 ccccc61
+62 63 64 aaa62 bbb62 ccccc62
+63 64 65 aaa63 bbb63 ccccc63
+64 65 66 aaa64 bbb64 ccccc64
+65 66 67 aaa65 bbb65 ccccc65
+66 67 68 aaa66 bbb66 ccccc66
+67 68 69 aaa67 bbb67 ccccc67
+68 69 70 aaa68 bbb68 ccccc68
+69 70 71 aaa69 bbb69 ccccc69
+70 71 72 aaa70 bbb70 ccccc70
+71 72 73 aaa71 bbb71 ccccc71
+72 73 74 aaa72 bbb72 ccccc72
+73 74 75 aaa73 bbb73 ccccc73
+74 75 76 aaa74 bbb74 ccccc74
+75 76 77 aaa75 bbb75 ccccc75
+76 77 78 aaa76 bbb76 ccccc76
+77 78 79 aaa77 bbb77 ccccc77
+78 79 80 aaa78 bbb78 ccccc78
+79 80 81 aaa79 bbb79 ccccc79
+80 81 82 aaa80 bbb80 ccccc80
+81 82 83 aaa81 bbb81 ccccc81
+82 83 84 aaa82 bbb82 ccccc82
+83 84 85 aaa83 bbb83 ccccc83
+84 85 86 aaa84 bbb84 ccccc84
+85 86 87 aaa85 bbb85 ccccc85
+86 87 88 aaa86 bbb86 ccccc86
+87 88 89 aaa87 bbb87 ccccc87
+88 89 90 aaa88 bbb88 ccccc88
+89 90 91 aaa89 bbb89 ccccc89
+90 91 92 aaa90 bbb90 ccccc90
+91 92 93 aaa91 bbb91 ccccc91
+92 93 94 aaa92 bbb92 ccccc92
+93 94 95 aaa93 bbb93 ccccc93
+94 95 96 aaa94 bbb94 ccccc94
+95 96 97 aaa95 bbb95 ccccc95
+96 97 98 aaa96 bbb96 ccccc96
+97 98 99 aaa97 bbb97 ccccc97
+98 99 100 aaa98 bbb98 ccccc98
+99 100 101 aaa99 bbb99 ccccc99
+100 101 102 aaa100 bbb100 ccccc100
+DROP TABLE test.t;
+ALTER TABLESPACE ts
+DROP DATAFILE './datafile.dat'
+ ENGINE NDB;
+DROP TABLESPACE ts ENGINE NDB;
+DROP LOGFILE GROUP lg
+ENGINE=NDB;
+DROP table IF EXISTS test.t1;
+Warnings:
+Note 1051 Unknown table 't1'
+DROP table IF EXISTS test.t2;
+Warnings:
+Note 1051 Unknown table 't2'
+CREATE LOGFILE GROUP lg
+ADD UNDOFILE './lg_group/undofile.dat'
+ INITIAL_SIZE 16M
+UNDO_BUFFER_SIZE = 1M
+ENGINE=NDB;
+CREATE TABLESPACE ts1
+ADD DATAFILE './table_space1/datafile.dat'
+ USE LOGFILE GROUP lg
+INITIAL_SIZE 12M
+ENGINE NDB;
+CREATE TABLESPACE ts2
+ADD DATAFILE './table_space2/datafile.dat'
+ USE LOGFILE GROUP lg
+INITIAL_SIZE 12M
+ENGINE NDB;
+CREATE TABLE test.t1 (
+a1 smallint NOT NULL,
+a2 int NOT NULL,
+a3 bigint NOT NULL,
+a4 char(10),
+a5 decimal(5,1),
+a6 time,
+a7 date,
+a8 datetime,
+a9 VARCHAR(255),
+a10 blob,
+PRIMARY KEY(a1)
+) TABLESPACE ts1 STORAGE DISK ENGINE=NDB;
+ALTER TABLE test.t1 ADD INDEX (a2), ADD INDEX (a3), ADD INDEX (a8);
+SHOW CREATE TABLE test.t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a1` smallint(6) NOT NULL,
+ `a2` int(11) NOT NULL,
+ `a3` bigint(20) NOT NULL,
+ `a4` char(10) DEFAULT NULL,
+ `a5` decimal(5,1) DEFAULT NULL,
+ `a6` time DEFAULT NULL,
+ `a7` date DEFAULT NULL,
+ `a8` datetime DEFAULT NULL,
+ `a9` varchar(255) DEFAULT NULL,
+ `a10` blob,
+ PRIMARY KEY (`a1`),
+ KEY `a2` (`a2`),
+ KEY `a3` (`a3`),
+ KEY `a8` (`a8`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1
+CREATE TABLE test.t2 (
+b1 smallint NOT NULL,
+b2 int NOT NULL,
+b3 bigint NOT NULL,
+b4 char(10),
+b5 decimal(5,1),
+b6 time,
+b7 date,
+b8 datetime,
+b9 VARCHAR(255),
+b10 blob,
+PRIMARY KEY(b1)
+) ENGINE=NDB;
+ALTER TABLE test.t2 ADD INDEX (b2), ADD INDEX (b3), ADD INDEX (b8);
+SHOW CREATE TABLE test.t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `b1` smallint(6) NOT NULL,
+ `b2` int(11) NOT NULL,
+ `b3` bigint(20) NOT NULL,
+ `b4` char(10) DEFAULT NULL,
+ `b5` decimal(5,1) DEFAULT NULL,
+ `b6` time DEFAULT NULL,
+ `b7` date DEFAULT NULL,
+ `b8` datetime DEFAULT NULL,
+ `b9` varchar(255) DEFAULT NULL,
+ `b10` blob,
+ PRIMARY KEY (`b1`),
+ KEY `b2` (`b2`),
+ KEY `b3` (`b3`),
+ KEY `b8` (`b8`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1
+SELECT * FROM test.t1 order by a1;
+a1 a2 a3 a4 a5 a6 a7 a8 a9 a10
+1 2 2000000001 aaa1 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb1 binary data
+2 3 2000000002 aaa2 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb2 binary data
+3 4 2000000003 aaa3 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb3 binary data
+4 5 2000000004 aaa4 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb4 binary data
+5 6 2000000005 aaa5 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb5 binary data
+6 7 2000000006 aaa6 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb6 binary data
+7 8 2000000007 aaa7 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb7 binary data
+8 9 2000000008 aaa8 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb8 binary data
+9 10 2000000009 aaa9 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb9 binary data
+10 11 2000000010 aaa10 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb10 binary data
+11 12 2000000011 aaa11 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb11 binary data
+12 13 2000000012 aaa12 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb12 binary data
+13 14 2000000013 aaa13 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb13 binary data
+14 15 2000000014 aaa14 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb14 binary data
+15 16 2000000015 aaa15 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb15 binary data
+16 17 2000000016 aaa16 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb16 binary data
+17 18 2000000017 aaa17 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb17 binary data
+18 19 2000000018 aaa18 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb18 binary data
+19 20 2000000019 aaa19 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb19 binary data
+20 21 2000000020 aaa20 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb20 binary data
+SELECT * FROM test.t2 order by b1;
+b1 b2 b3 b4 b5 b6 b7 b8 b9 b10
+3 4 3000000001 aaa1 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb1 binary data
+4 5 3000000002 aaa2 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb2 binary data
+5 6 3000000003 aaa3 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb3 binary data
+6 7 3000000004 aaa4 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb4 binary data
+7 8 3000000005 aaa5 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb5 binary data
+8 9 3000000006 aaa6 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb6 binary data
+9 10 3000000007 aaa7 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb7 binary data
+10 11 3000000008 aaa8 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb8 binary data
+11 12 3000000009 aaa9 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb9 binary data
+12 13 3000000010 aaa10 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb10 binary data
+13 14 3000000011 aaa11 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb11 binary data
+14 15 3000000012 aaa12 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb12 binary data
+15 16 3000000013 aaa13 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb13 binary data
+16 17 3000000014 aaa14 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb14 binary data
+17 18 3000000015 aaa15 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb15 binary data
+18 19 3000000016 aaa16 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb16 binary data
+19 20 3000000017 aaa17 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb17 binary data
+20 21 3000000018 aaa18 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb18 binary data
+21 22 3000000019 aaa19 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb19 binary data
+22 23 3000000020 aaa20 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb20 binary data
+SELECT COUNT(a1), a1, COUNT(a1)*a1 FROM test.t1 GROUP BY a1;
+COUNT(a1) a1 COUNT(a1)*a1
+1 1 1
+1 2 2
+1 3 3
+1 4 4
+1 5 5
+1 6 6
+1 7 7
+1 8 8
+1 9 9
+1 10 10
+1 11 11
+1 12 12
+1 13 13
+1 14 14
+1 15 15
+1 16 16
+1 17 17
+1 18 18
+1 19 19
+1 20 20
+SELECT COUNT(a2), (a2+1), COUNT(a2)*(a2+0) FROM test.t1 GROUP BY a2;
+COUNT(a2) (a2+1) COUNT(a2)*(a2+0)
+1 3 2
+1 4 3
+1 5 4
+1 6 5
+1 7 6
+1 8 7
+1 9 8
+1 10 9
+1 11 10
+1 12 11
+1 13 12
+1 14 13
+1 15 14
+1 16 15
+1 17 16
+1 18 17
+1 19 18
+1 20 19
+1 21 20
+1 22 21
+DROP TABLE test.t1;
+DROP TABLE test.t2;
+create table test.t1 (a int not null,b char(5), c text) TABLESPACE ts1 STORAGE DISK ENGINE=NDB;
+Warnings:
+Error 1465 Table storage engine 'ndbcluster' does not support the create option 'Binlog of table with BLOB attribute and no PK'
+insert into test.t1 (a) values (1),(2),(3),(4),(1),(2),(3),(4);
+select distinct a from test.t1 group by b,a having a > 2 order by a desc;
+a
+4
+3
+select distinct a,c from test.t1 group by b,c,a having a > 2 order by a desc;
+a c
+4 NULL
+3 NULL
+select distinct a from test.t1 group by b,a having a > 2 order by a asc;
+a
+3
+4
+select distinct a,c from test.t1 group by b,c,a having a > 2 order by a asc;
+a c
+3 NULL
+4 NULL
+drop table test.t1;
+create table test.t1 (a char(1), key(a)) TABLESPACE ts1 STORAGE DISK ENGINE=NDB;
+insert into test.t1 values('1'),('1'),('1'),('2'),('2'),('3'),('3');
+select * from test.t1 where a >= '1' order by a;
+a
+1
+1
+1
+2
+2
+3
+3
+select distinct a from test.t1 order by a desc;
+a
+3
+2
+1
+select distinct a from test.t1 where a >= '1' order by a desc;
+a
+3
+2
+1
+select distinct a from test.t1 where a >= '1' order by a asc;
+a
+1
+2
+3
+drop table test.t1;
+CREATE TABLE test.t1 (email varchar(50), infoID BIGINT, dateentered DATETIME) TABLESPACE ts1 STORAGE DISK ENGINE=NDB;
+CREATE TABLE test.t2 (infoID BIGINT, shipcode varchar(10)) ENGINE=NDB;
+INSERT INTO test.t1 (email, infoID, dateentered) VALUES
+('test1@testdomain.com', 1, '2002-07-30 22:56:38'),
+('test1@testdomain.com', 1, '2002-07-27 22:58:16'),
+('test2@testdomain.com', 1, '2002-06-19 15:22:19'),
+('test2@testdomain.com', 2, '2002-06-18 14:23:47'),
+('test3@testdomain.com', 1, '2002-05-19 22:17:32');
+INSERT INTO test.t2(infoID, shipcode) VALUES
+(1, 'Z001'),
+(2, 'R002');
+SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 WHERE t1.infoID=t2.infoID order by email, shipcode;
+email shipcode
+test1@testdomain.com Z001
+test2@testdomain.com R002
+test2@testdomain.com Z001
+test3@testdomain.com Z001
+SELECT DISTINCTROW email FROM test.t1 ORDER BY dateentered DESC;
+email
+test1@testdomain.com
+test2@testdomain.com
+test3@testdomain.com
+SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 WHERE test.t1.infoID=test.t2.infoID ORDER BY dateentered DESC;
+email shipcode
+test1@testdomain.com Z001
+test2@testdomain.com Z001
+test2@testdomain.com R002
+test3@testdomain.com Z001
+drop table test.t1,test.t2;
+ALTER TABLESPACE ts1
+DROP DATAFILE './table_space1/datafile.dat'
+ ENGINE NDB;
+DROP TABLESPACE ts1 ENGINE NDB;
+ALTER TABLESPACE ts2
+DROP DATAFILE './table_space2/datafile.dat'
+ ENGINE NDB;
+DROP TABLESPACE ts2 ENGINE NDB;
+DROP LOGFILE GROUP lg
+ENGINE=NDB;
+DROP TABLE IF EXISTS test.t;
+Warnings:
+Note 1051 Unknown table 't'
+create table test.t (f1 varchar(50) primary key, f2 text,f3 int) engine=NDB;
+insert into test.t (f1,f2,f3)VALUES("111111","aaaaaa",1);
+insert into test.t (f1,f2,f3)VALUES("222222","bbbbbb",2);
+select * from test.t order by f1;
+f1 f2 f3
+111111 aaaaaa 1
+222222 bbbbbb 2
+select f1,f2 from test.t order by f2;
+f1 f2
+111111 aaaaaa
+222222 bbbbbb
+select f2 from test.t order by f2;
+f2
+aaaaaa
+bbbbbb
+select f1,f2 from test.t order by f1;
+f1 f2
+111111 aaaaaa
+222222 bbbbbb
+drop table test.t;
+CREATE LOGFILE GROUP lg
+ADD UNDOFILE './lg_group/undofile.dat'
+ INITIAL_SIZE 16M
+UNDO_BUFFER_SIZE = 1M
+ENGINE=NDB;
+CREATE TABLESPACE ts
+ADD DATAFILE './table_space/datafile.dat'
+ USE LOGFILE GROUP lg
+INITIAL_SIZE 12M
+ENGINE NDB;
+CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) ENGINE=InnoDB;
+SHOW CREATE TABLE test.t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a1` int(11) DEFAULT NULL,
+ `a2` blob,
+ `a3` text
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+ALTER TABLE test.t1 TABLESPACE ts STORAGE DISK ENGINE=NDB;
+Warnings:
+Error 1465 Table storage engine 'ndbcluster' does not support the create option 'Binlog of table with BLOB attribute and no PK'
+SHOW CREATE TABLE test.t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a1` int(11) DEFAULT NULL,
+ `a2` blob,
+ `a3` text
+) TABLESPACE ts STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1
+DROP TABLE test.t1;
+CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) ENGINE=MyISAM;
+SHOW CREATE TABLE test.t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a1` int(11) DEFAULT NULL,
+ `a2` blob,
+ `a3` text
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+ALTER TABLE test.t1 TABLESPACE ts STORAGE DISK ENGINE=NDB;
+Warnings:
+Error 1465 Table storage engine 'ndbcluster' does not support the create option 'Binlog of table with BLOB attribute and no PK'
+SHOW CREATE TABLE test.t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a1` int(11) DEFAULT NULL,
+ `a2` blob,
+ `a3` text
+) TABLESPACE ts STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1
+DROP TABLE test.t1;
+CREATE TABLE test.t1 (a1 INT PRIMARY KEY, a2 BLOB, a3 TEXT) TABLESPACE ts STORAGE DISK ENGINE=NDB;
+SHOW CREATE TABLE test.t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a1` int(11) NOT NULL,
+ `a2` blob,
+ `a3` text,
+ PRIMARY KEY (`a1`)
+) TABLESPACE ts STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1
+ALTER TABLE test.t1 ENGINE=InnoDB;
+SHOW CREATE TABLE test.t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a1` int(11) NOT NULL,
+ `a2` blob,
+ `a3` text,
+ PRIMARY KEY (`a1`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+DROP TABLE test.t1;
+CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) TABLESPACE ts STORAGE DISK ENGINE=NDB;
+Warnings:
+Error 1465 Table storage engine 'ndbcluster' does not support the create option 'Binlog of table with BLOB attribute and no PK'
+SHOW CREATE TABLE test.t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a1` int(11) DEFAULT NULL,
+ `a2` blob,
+ `a3` text
+) TABLESPACE ts STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1
+ALTER TABLE test.t1 ENGINE=MyISAM;
+SHOW CREATE TABLE test.t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a1` int(11) DEFAULT NULL,
+ `a2` blob,
+ `a3` text
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE test.t1;
+CREATE TABLE test.t1 (a1 INT PRIMARY KEY) TABLESPACE ts STORAGE DISK ENGINE=NDB;
+ALTER TABLE test.t1 ADD a2 FLOAT, ADD a3 DOUBLE, ADD a4 BIT, ADD a5 TINYINT, ADD a6 BIGINT, ADD a7 DATE, ADD a8 TIME, ADD a9 DATETIME, ADD a10 TINYTEXT, ADD a11 MEDIUMTEXT, ADD a12 LONGTEXT, ADD a13 TEXT, ADD a14 BLOB;
+SHOW CREATE TABLE test.t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a1` int(11) NOT NULL,
+ `a2` float DEFAULT NULL,
+ `a3` double DEFAULT NULL,
+ `a4` bit(1) DEFAULT NULL,
+ `a5` tinyint(4) DEFAULT NULL,
+ `a6` bigint(20) DEFAULT NULL,
+ `a7` date DEFAULT NULL,
+ `a8` time DEFAULT NULL,
+ `a9` datetime DEFAULT NULL,
+ `a10` tinytext,
+ `a11` mediumtext,
+ `a12` longtext,
+ `a13` text,
+ `a14` blob,
+ PRIMARY KEY (`a1`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1
+ALTER TABLE test.t1 ADD INDEX (a2), ADD INDEX (a3), ADD INDEX (a5), ADD INDEX (a6),
+ADD INDEX (a7), ADD INDEX (a8);
+SHOW CREATE TABLE test.t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a1` int(11) NOT NULL,
+ `a2` float DEFAULT NULL,
+ `a3` double DEFAULT NULL,
+ `a4` bit(1) DEFAULT NULL,
+ `a5` tinyint(4) DEFAULT NULL,
+ `a6` bigint(20) DEFAULT NULL,
+ `a7` date DEFAULT NULL,
+ `a8` time DEFAULT NULL,
+ `a9` datetime DEFAULT NULL,
+ `a10` tinytext,
+ `a11` mediumtext,
+ `a12` longtext,
+ `a13` text,
+ `a14` blob,
+ PRIMARY KEY (`a1`),
+ KEY `a2` (`a2`),
+ KEY `a3` (`a3`),
+ KEY `a5` (`a5`),
+ KEY `a6` (`a6`),
+ KEY `a7` (`a7`),
+ KEY `a8` (`a8`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1
+DROP TABLE test.t1;
+CREATE TABLE test.t1 (a1 INT PRIMARY KEY) TABLESPACE ts STORAGE DISK ENGINE=NDB;
+ALTER TABLE test.t1 ADD a2 FLOAT, ADD a3 DOUBLE, ADD a4 BIT, ADD a5 TINYINT, ADD a6 BIGINT, ADD a7 DATE, ADD a8 TIME, ADD a9 DATETIME, ADD a10 TINYTEXT, ADD a11 MEDIUMTEXT, ADD a12 LONGTEXT, ADD a13 TEXT, ADD a14 BLOB;
+SHOW CREATE TABLE test.t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a1` int(11) NOT NULL,
+ `a2` float DEFAULT NULL,
+ `a3` double DEFAULT NULL,
+ `a4` bit(1) DEFAULT NULL,
+ `a5` tinyint(4) DEFAULT NULL,
+ `a6` bigint(20) DEFAULT NULL,
+ `a7` date DEFAULT NULL,
+ `a8` time DEFAULT NULL,
+ `a9` datetime DEFAULT NULL,
+ `a10` tinytext,
+ `a11` mediumtext,
+ `a12` longtext,
+ `a13` text,
+ `a14` blob,
+ PRIMARY KEY (`a1`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1
+ALTER TABLE test.t1 DROP a14;
+ALTER TABLE test.t1 DROP a13;
+ALTER TABLE test.t1 DROP a12;
+ALTER TABLE test.t1 DROP a11;
+ALTER TABLE test.t1 DROP a10;
+ALTER TABLE test.t1 DROP a9;
+ALTER TABLE test.t1 DROP a8;
+ALTER TABLE test.t1 DROP a7;
+ALTER TABLE test.t1 DROP a6;
+ALTER TABLE test.t1 DROP PRIMARY KEY;
+SHOW CREATE TABLE test.t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a1` int(11) NOT NULL,
+ `a2` float DEFAULT NULL,
+ `a3` double DEFAULT NULL,
+ `a4` bit(1) DEFAULT NULL,
+ `a5` tinyint(4) DEFAULT NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1
+DROP TABLE test.t1;
+ALTER TABLESPACE ts
+DROP DATAFILE './table_space/datafile.dat'
+ ENGINE NDB;
+DROP TABLESPACE ts ENGINE NDB;
+DROP LOGFILE GROUP lg
+ENGINE=NDB;
diff --git a/mysql-test/r/ndb_dd_advance2.result b/mysql-test/r/ndb_dd_advance2.result
new file mode 100644
index 00000000000..c7fcda650e6
--- /dev/null
+++ b/mysql-test/r/ndb_dd_advance2.result
@@ -0,0 +1,746 @@
+DROP TABLE IF EXISTS test.t1;
+DROP TABLE IF EXISTS test.t2;
+DROP TABLE IF EXISTS test.t3;
+*****
+**** Copy data from table in one table space to table in different table space
+*****
+CREATE LOGFILE GROUP lg
+ADD UNDOFILE './lg_group/undofile.dat'
+ INITIAL_SIZE 16M
+UNDO_BUFFER_SIZE = 1M
+ENGINE=NDB;
+CREATE TABLESPACE ts1
+ADD DATAFILE './table_space1/datafile.dat'
+ USE LOGFILE GROUP lg
+INITIAL_SIZE 12M
+ENGINE NDB;
+CREATE TABLESPACE ts2
+ADD DATAFILE './table_space2/datafile.dat'
+ USE LOGFILE GROUP lg
+INITIAL_SIZE 12M
+ENGINE NDB;
+CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB)
+TABLESPACE ts1 STORAGE DISK ENGINE=NDB;
+CREATE TABLE test.t2 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB)
+TABLESPACE ts2 STORAGE DISK ENGINE=NDB;
+SHOW CREATE TABLE test.t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a1` int(11) NOT NULL,
+ `a2` varchar(256) DEFAULT NULL,
+ `a3` blob,
+ PRIMARY KEY (`a1`)
+) TABLESPACE ts1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1
+SHOW CREATE TABLE test.t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a1` int(11) NOT NULL,
+ `a2` varchar(256) DEFAULT NULL,
+ `a3` blob,
+ PRIMARY KEY (`a1`)
+) TABLESPACE ts2 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1
+INSERT INTO test.t1 VALUES (1,'111111','aaaaaaaa');
+INSERT INTO test.t1 VALUES (2,'222222','bbbbbbbb');
+SELECT * FROM test.t1 ORDER BY a1;
+a1 a2 a3
+1 111111 aaaaaaaa
+2 222222 bbbbbbbb
+INSERT INTO test.t2(a1,a2,a3) SELECT * FROM test.t1;
+SELECT * FROM test.t2 ORDER BY a1;
+a1 a2 a3
+1 111111 aaaaaaaa
+2 222222 bbbbbbbb
+DROP TABLE test.t1, test.t2;
+set @vc1 = repeat('a', 200);
+set @vc2 = repeat('b', 500);
+set @vc3 = repeat('c', 1000);
+set @vc4 = repeat('d', 4000);
+set @x0 = '01234567012345670123456701234567';
+set @x0 = concat(@x0,@x0,@x0,@x0,@x0,@x0,@x0,@x0);
+set @b1 = 'b1';
+set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1);
+set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1);
+set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1);
+set @b1 = concat(@b1,@x0);
+set @d1 = 'dd1';
+set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1);
+set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1);
+set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1);
+set @b2 = 'b2';
+set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
+set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
+set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
+set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
+set @d2 = 'dd2';
+set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);
+set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);
+set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);
+set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);
+select length(@x0),length(@b1),length(@d1) from dual;
+length(@x0) length(@b1) length(@d1)
+256 2256 3000
+select length(@x0),length(@b2),length(@d2) from dual;
+length(@x0) length(@b2) length(@d2)
+256 20000 30000
+CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(5000), a3 BLOB)
+TABLESPACE ts1 STORAGE DISK ENGINE=NDB;
+CREATE TABLE test.t2 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(5000), a3 BLOB)
+TABLESPACE ts2 STORAGE DISK ENGINE=NDB;
+SHOW CREATE TABLE test.t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a1` int(11) NOT NULL,
+ `a2` varchar(5000) DEFAULT NULL,
+ `a3` blob,
+ PRIMARY KEY (`a1`)
+) TABLESPACE ts1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1
+SHOW CREATE TABLE test.t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a1` int(11) NOT NULL,
+ `a2` varchar(5000) DEFAULT NULL,
+ `a3` blob,
+ PRIMARY KEY (`a1`)
+) TABLESPACE ts2 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1
+INSERT INTO test.t1 VALUES (1,@vc1,@d1);
+INSERT INTO test.t1 VALUES (2,@vc2,@b1);
+INSERT INTO test.t1 VALUES (3,@vc3,@d2);
+INSERT INTO test.t1 VALUES (4,@vc4,@b2);
+SELECT a1,length(a2),substr(a2,180,2),length(a3),substr(a3,1+3*900,3)
+FROM test.t1 WHERE a1=1;
+a1 length(a2) substr(a2,180,2) length(a3) substr(a3,1+3*900,3)
+1 200 aa 3000 dd1
+SELECT a1,length(a2),substr(a2,480,2),length(a3),substr(a3,1+2*900,3)
+FROM test.t1 where a1=2;
+a1 length(a2) substr(a2,480,2) length(a3) substr(a3,1+2*900,3)
+2 500 bb 2256 b1b
+INSERT INTO test.t2(a1,a2,a3) SELECT * FROM test.t1;
+SELECT a1,length(a2),substr(a2,180,2),length(a3),substr(a3,1+3*900,3)
+FROM test.t2 WHERE a1=1;
+a1 length(a2) substr(a2,180,2) length(a3) substr(a3,1+3*900,3)
+1 200 aa 3000 dd1
+SELECT a1,length(a2),substr(a2,480,2),length(a3),substr(a3,1+2*900,3)
+FROM test.t2 where a1=2;
+a1 length(a2) substr(a2,480,2) length(a3) substr(a3,1+2*900,3)
+2 500 bb 2256 b1b
+DROP TABLE test.t1, test.t2;
+ALTER TABLESPACE ts1
+DROP DATAFILE './table_space1/datafile.dat'
+ ENGINE NDB;
+DROP TABLESPACE ts1 ENGINE NDB;
+ALTER TABLESPACE ts2
+DROP DATAFILE './table_space2/datafile.dat'
+ ENGINE NDB;
+DROP TABLESPACE ts2 ENGINE NDB;
+DROP LOGFILE GROUP lg
+ENGINE=NDB;
+*****
+**** Insert, Update, Delete from NDB table with BLOB fields
+*****
+CREATE LOGFILE GROUP lg
+ADD UNDOFILE './lg_group/undofile.dat'
+ INITIAL_SIZE 16M
+UNDO_BUFFER_SIZE = 1M
+ENGINE=NDB;
+CREATE TABLESPACE ts1
+ADD DATAFILE './table_space1/datafile.dat'
+ USE LOGFILE GROUP lg
+INITIAL_SIZE 12M
+ENGINE NDB;
+set @vc1 = repeat('a', 200);
+set @vc2 = repeat('b', 500);
+set @vc3 = repeat('c', 1000);
+set @vc4 = repeat('d', 4000);
+set @vc5 = repeat('d', 5000);
+set @bb1 = repeat('1', 2000);
+set @bb2 = repeat('2', 5000);
+set @bb3 = repeat('3', 10000);
+set @bb4 = repeat('4', 40000);
+set @bb5 = repeat('5', 50000);
+select length(@vc1),length(@vc2),length(@vc3),length(@vc4),length(@vc5) from dual;
+length(@vc1) length(@vc2) length(@vc3) length(@vc4) length(@vc5)
+200 500 1000 4000 5000
+select length(@bb1),length(@bb2),length(@bb3),length(@bb4),length(@bb5) from dual;
+length(@bb1) length(@bb2) length(@bb3) length(@bb4) length(@bb5)
+2000 5000 10000 40000 50000
+CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(5000), a3 BLOB)
+TABLESPACE ts1 STORAGE DISK ENGINE=NDB;
+INSERT INTO test.t1 VALUES (1,@vc1,@bb1);
+INSERT INTO test.t1 VALUES (2,@vc2,@bb2);
+INSERT INTO test.t1 VALUES (3,@vc3,@bb3);
+INSERT INTO test.t1 VALUES (4,@vc4,@bb4);
+INSERT INTO test.t1 VALUES (5,@vc5,@bb5);
+UPDATE test.t1 SET a2=@vc5, a3=@bb5 WHERE a1=1;
+SELECT a1,length(a2),substr(a2,4998,2),length(a3),substr(a3,49997,3)
+FROM test.t1 WHERE a1=1;
+a1 length(a2) substr(a2,4998,2) length(a3) substr(a3,49997,3)
+1 5000 dd 50000 555
+UPDATE test.t1 SET a2=@vc4, a3=@bb4 WHERE a1=2;
+SELECT a1,length(a2),substr(a2,3998,2),length(a3),substr(a3,39997,3)
+FROM test.t1 WHERE a1=2;
+a1 length(a2) substr(a2,3998,2) length(a3) substr(a3,39997,3)
+2 4000 dd 40000 444
+UPDATE test.t1 SET a2=@vc2, a3=@bb2 WHERE a1=3;
+SELECT a1,length(a2),substr(a2,498,2),length(a3),substr(a3,3997,3)
+FROM test.t1 WHERE a1=3;
+a1 length(a2) substr(a2,498,2) length(a3) substr(a3,3997,3)
+3 500 bb 5000 222
+UPDATE test.t1 SET a2=@vc3, a3=@bb3 WHERE a1=4;
+SELECT a1,length(a2),substr(a2,998,2),length(a3),substr(a3,9997,3)
+FROM test.t1 WHERE a1=4;
+a1 length(a2) substr(a2,998,2) length(a3) substr(a3,9997,3)
+4 1000 cc 10000 333
+UPDATE test.t1 SET a2=@vc1, a3=@bb1 WHERE a1=5;
+SELECT a1,length(a2),substr(a2,198,2),length(a3),substr(a3,1997,3)
+FROM test.t1 WHERE a1=5;
+a1 length(a2) substr(a2,198,2) length(a3) substr(a3,1997,3)
+5 200 aa 2000 111
+DELETE FROM test.t1 where a1=5;
+SELECT count(*) from test.t1;
+count(*)
+4
+DELETE FROM test.t1 where a1=4;
+SELECT count(*) from test.t1;
+count(*)
+3
+DELETE FROM test.t1 where a1=3;
+SELECT count(*) from test.t1;
+count(*)
+2
+DELETE FROM test.t1 where a1=2;
+SELECT count(*) from test.t1;
+count(*)
+1
+DELETE FROM test.t1 where a1=1;
+SELECT count(*) from test.t1;
+count(*)
+0
+DROP TABLE test.t1;
+ALTER TABLESPACE ts1
+DROP DATAFILE './table_space1/datafile.dat'
+ ENGINE NDB;
+DROP TABLESPACE ts1 ENGINE NDB;
+DROP LOGFILE GROUP lg
+ENGINE=NDB;
+*****
+**** Create Stored procedures that use disk based tables
+*****
+CREATE LOGFILE GROUP lg
+ADD UNDOFILE './lg_group/undofile.dat'
+ INITIAL_SIZE 16M
+UNDO_BUFFER_SIZE = 1M
+ENGINE=NDB;
+CREATE TABLESPACE ts1
+ADD DATAFILE './table_space1/datafile.dat'
+ USE LOGFILE GROUP lg
+INITIAL_SIZE 12M
+ENGINE NDB;
+CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB)
+TABLESPACE ts1 STORAGE DISK ENGINE=NDB//
+CREATE PROCEDURE test.sp1()
+BEGIN
+INSERT INTO test.t1 values (1,'111111','aaaaaaaa');
+END//
+CALL test.sp1();
+SELECT * FROM test.t1;
+a1 a2 a3
+1 111111 aaaaaaaa
+CREATE PROCEDURE test.sp2(n INT, vc VARCHAR(256), blb BLOB)
+BEGIN
+UPDATE test.t1 SET a2=vc, a3=blb where a1=n;
+END//
+CALL test.sp2(1,'222222','bbbbbbbb');
+SELECT * FROM test.t1;
+a1 a2 a3
+1 222222 bbbbbbbb
+DELETE FROM test.t1;
+DROP PROCEDURE test.sp1;
+DROP PROCEDURE test.sp2;
+DROP TABLE test.t1;
+ALTER TABLESPACE ts1
+DROP DATAFILE './table_space1/datafile.dat'
+ ENGINE=NDB;
+DROP TABLESPACE ts1 ENGINE=NDB;
+DROP LOGFILE GROUP lg
+ENGINE=NDB;
+*****
+***** Create function that operate on disk based tables
+*****
+CREATE LOGFILE GROUP lg
+ADD UNDOFILE './lg_group/undofile.dat'
+ INITIAL_SIZE 16M
+UNDO_BUFFER_SIZE = 1M
+ENGINE=NDB;
+CREATE TABLESPACE ts1
+ADD DATAFILE './table_space1/datafile.dat'
+ USE LOGFILE GROUP lg
+INITIAL_SIZE 12M
+ENGINE NDB;
+CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB)
+TABLESPACE ts1 STORAGE DISK ENGINE=NDB;
+CREATE FUNCTION test.fn1(n INT) RETURNS INT
+BEGIN
+DECLARE v INT;
+SELECT a1 INTO v FROM test.t1 WHERE a1=n;
+RETURN v;
+END//
+CREATE FUNCTION test.fn2(n INT, blb BLOB) RETURNS BLOB
+BEGIN
+DECLARE vv BLOB;
+UPDATE test.t1 SET a3=blb where a1=n;
+SELECT a3 INTO vv FROM test.t1 WHERE a1=n;
+RETURN vv;
+END//
+SELECT test.fn1(10) FROM DUAL;
+test.fn1(10)
+10
+SELECT test.fn2(50, 'new BLOB content') FROM DUAL;
+test.fn2(50, 'new BLOB content')
+new BLOB content
+DELETE FROM test.t1;
+DROP FUNCTION test.fn1;
+DROP FUNCTION test.fn2;
+DROP TABLE test.t1;
+ALTER TABLESPACE ts1
+DROP DATAFILE './table_space1/datafile.dat'
+ ENGINE=NDB;
+DROP TABLESPACE ts1 ENGINE=NDB;
+DROP LOGFILE GROUP lg
+ENGINE=NDB;
+*****
+***** Create triggers that operate on disk based tables
+*****
+CREATE LOGFILE GROUP lg
+ADD UNDOFILE './lg_group/undofile.dat'
+ INITIAL_SIZE 16M
+UNDO_BUFFER_SIZE = 1M
+ENGINE=NDB;
+CREATE TABLESPACE ts1
+ADD DATAFILE './table_space1/datafile.dat'
+ USE LOGFILE GROUP lg
+INITIAL_SIZE 12M
+ENGINE NDB;
+CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB)
+TABLESPACE ts1 STORAGE DISK ENGINE=NDB;
+CREATE TRIGGER test.trg1 BEFORE INSERT ON test.t1 FOR EACH ROW
+BEGIN
+if isnull(new.a2) then
+set new.a2:= 'trg1 works on a2 field';
+end if;
+if isnull(new.a3) then
+set new.a3:= 'trg1 works on a3 field';
+end if;
+end//
+insert into test.t1 (a1) values (1)//
+insert into test.t1 (a1,a2) values (2, 'ccccccc')//
+select * from test.t1 order by a1//
+a1 a2 a3
+1 trg1 works on a2 field trg1 works on a3 field
+2 ccccccc trg1 works on a3 field
+DELETE FROM test.t1;
+DROP TRIGGER test.trg1;
+DROP TABLE test.t1;
+ALTER TABLESPACE ts1
+DROP DATAFILE './table_space1/datafile.dat'
+ ENGINE=NDB;
+DROP TABLESPACE ts1 ENGINE=NDB;
+DROP LOGFILE GROUP lg
+ENGINE=NDB;
+*****
+***** Create, update views that operate on disk based tables
+*****
+CREATE LOGFILE GROUP lg
+ADD UNDOFILE './lg_group/undofile.dat'
+ INITIAL_SIZE 16M
+UNDO_BUFFER_SIZE = 1M
+ENGINE=NDB;
+CREATE TABLESPACE ts1
+ADD DATAFILE './table_space1/datafile.dat'
+ USE LOGFILE GROUP lg
+INITIAL_SIZE 12M
+ENGINE NDB;
+CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB)
+TABLESPACE ts1 STORAGE DISK ENGINE=NDB;
+CREATE VIEW test.v1 AS SELECT * FROM test.t1;
+SELECT * FROM test.v1 order by a1;
+a1 a2 a3
+1 aaaaa1 bbbbb1
+2 aaaaa2 bbbbb2
+3 aaaaa3 bbbbb3
+4 aaaaa4 bbbbb4
+5 aaaaa5 bbbbb5
+6 aaaaa6 bbbbb6
+7 aaaaa7 bbbbb7
+8 aaaaa8 bbbbb8
+9 aaaaa9 bbbbb9
+10 aaaaa10 bbbbb10
+CHECK TABLE test.v1, test.t1;
+Table Op Msg_type Msg_text
+test.v1 check status OK
+test.t1 check note The storage engine for the table doesn't support check
+UPDATE test.v1 SET a2='zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz' WHERE a1=5;
+SELECT * FROM test.v1 order by a1;
+a1 a2 a3
+1 aaaaa1 bbbbb1
+2 aaaaa2 bbbbb2
+3 aaaaa3 bbbbb3
+4 aaaaa4 bbbbb4
+5 zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz bbbbb5
+6 aaaaa6 bbbbb6
+7 aaaaa7 bbbbb7
+8 aaaaa8 bbbbb8
+9 aaaaa9 bbbbb9
+10 aaaaa10 bbbbb10
+DROP VIEW test.v1;
+DELETE FROM test.t1;
+DROP TABLE test.t1;
+ALTER TABLESPACE ts1
+DROP DATAFILE './table_space1/datafile.dat'
+ ENGINE=NDB;
+DROP TABLESPACE ts1 ENGINE=NDB;
+DROP LOGFILE GROUP lg
+ENGINE=NDB;
+*****
+***** Create and use disk based table that use auto inc
+*****
+CREATE LOGFILE GROUP lg
+ADD UNDOFILE './lg_group/undofile.dat'
+ INITIAL_SIZE 16M
+UNDO_BUFFER_SIZE = 1M
+ENGINE=NDB;
+CREATE TABLESPACE ts1
+ADD DATAFILE './table_space1/datafile.dat'
+ USE LOGFILE GROUP lg
+INITIAL_SIZE 12M
+ENGINE NDB;
+CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB;
+SELECT * FROM test.t1 ORDER BY a1;
+a1 a2 a3
+1 aaaaa10 bbbbb10
+2 aaaaa9 bbbbb9
+3 aaaaa8 bbbbb8
+4 aaaaa7 bbbbb7
+5 aaaaa6 bbbbb6
+6 aaaaa5 bbbbb5
+7 aaaaa4 bbbbb4
+8 aaaaa3 bbbbb3
+9 aaaaa2 bbbbb2
+10 aaaaa1 bbbbb1
+DELETE FROM test.t1;
+DROP TABLE test.t1;
+ALTER TABLESPACE ts1
+DROP DATAFILE './table_space1/datafile.dat'
+ ENGINE=NDB;
+DROP TABLESPACE ts1 ENGINE=NDB;
+DROP LOGFILE GROUP lg
+ENGINE=NDB;
+*****
+***** Create test that use transaction (commit, rollback)
+*****
+CREATE LOGFILE GROUP lg
+ADD UNDOFILE './lg_group/undofile.dat'
+ INITIAL_SIZE 16M
+UNDO_BUFFER_SIZE = 1M
+ENGINE=NDB;
+CREATE TABLESPACE ts1
+ADD DATAFILE './table_space1/datafile.dat'
+ USE LOGFILE GROUP lg
+INITIAL_SIZE 12M
+ENGINE NDB;
+SET AUTOCOMMIT=0;
+CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB;
+INSERT INTO test.t1 VALUES(NULL, "aaaaa1", "bbbbb1");
+COMMIT;
+SELECT * FROM test.t1 ORDER BY a1;
+a1 a2 a3
+1 aaaaa1 bbbbb1
+INSERT INTO test.t1 VALUES(NULL, "aaaaa2", "bbbbb2");
+ROLLBACK;
+SELECT * FROM test.t1 ORDER BY a1;
+a1 a2 a3
+1 aaaaa1 bbbbb1
+DELETE FROM test.t1;
+DROP TABLE test.t1;
+SET AUTOCOMMIT=1;
+CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB;
+START TRANSACTION;
+INSERT INTO test.t1 VALUES(NULL, "aaaaa1", "bbbbb1");
+COMMIT;
+SELECT * FROM test.t1 ORDER BY a1;
+a1 a2 a3
+1 aaaaa1 bbbbb1
+START TRANSACTION;
+INSERT INTO test.t1 VALUES(NULL, "aaaaa2", "bbbbb2");
+ROLLBACK;
+SELECT * FROM test.t1 ORDER BY a1;
+a1 a2 a3
+1 aaaaa1 bbbbb1
+DELETE FROM test.t1;
+DROP TABLE test.t1;
+ALTER TABLESPACE ts1
+DROP DATAFILE './table_space1/datafile.dat'
+ ENGINE=NDB;
+DROP TABLESPACE ts1 ENGINE=NDB;
+DROP LOGFILE GROUP lg
+ENGINE=NDB;
+*****
+***** Create test that uses locks
+*****
+CREATE LOGFILE GROUP lg
+ADD UNDOFILE './lg_group/undofile.dat'
+ INITIAL_SIZE 16M
+UNDO_BUFFER_SIZE = 1M
+ENGINE=NDB;
+CREATE TABLESPACE ts1
+ADD DATAFILE './table_space1/datafile.dat'
+ USE LOGFILE GROUP lg
+INITIAL_SIZE 12M
+ENGINE NDB;
+drop table if exists test.t1;
+CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB;
+LOCK TABLES test.t1 write;
+INSERT INTO test.t1 VALUES(NULL, "aaaaa1", "bbbbb1");
+INSERT INTO test.t1 VALUES(NULL, "aaaaa2", "bbbbb2");
+SELECT * FROM test.t1 ORDER BY a1;
+a1 a2 a3
+1 aaaaa1 bbbbb1
+2 aaaaa2 bbbbb2
+SELECT * FROM test.t1 ORDER BY a1;
+a1 a2 a3
+1 aaaaa1 bbbbb1
+2 aaaaa2 bbbbb2
+INSERT INTO test.t1 VALUES(NULL, "aaaaa3", "bbbbb3");
+UNLOCK TABLES;
+INSERT INTO test.t1 VALUES(NULL, "aaaaa3", "bbbbb3");
+SELECT * FROM test.t1 ORDER BY a1;
+a1 a2 a3
+1 aaaaa1 bbbbb1
+2 aaaaa2 bbbbb2
+3 aaaaa3 bbbbb3
+4 aaaaa3 bbbbb3
+DELETE FROM test.t1;
+DROP TABLE test.t1;
+ALTER TABLESPACE ts1
+DROP DATAFILE './table_space1/datafile.dat'
+ ENGINE=NDB;
+DROP TABLESPACE ts1 ENGINE=NDB;
+DROP LOGFILE GROUP lg
+ENGINE=NDB;
+*****
+***** Create large disk base table, do random queries, check cache hits
+*****
+set @vc1 = repeat('a', 200);
+SELECT @vc1 FROM DUAL;
+@vc1
+aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+set @vc2 = repeat('b', 500);
+set @vc3 = repeat('b', 998);
+set @x0 = '01234567012345670123456701234567';
+set @x0 = concat(@x0,@x0,@x0,@x0,@x0,@x0,@x0,@x0);
+set @b1 = 'b1';
+set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1);
+set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1);
+set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1);
+set @b1 = concat(@b1,@x0);
+set @d1 = 'dd1';
+set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1);
+set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1);
+set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1);
+set @b2 = 'b2';
+set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
+set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
+set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
+set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
+set @d2 = 'dd2';
+set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);
+set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);
+set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);
+set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);
+select length(@x0),length(@b1),length(@d1) from dual;
+length(@x0) length(@b1) length(@d1)
+256 2256 3000
+select length(@x0),length(@b2),length(@d2) from dual;
+length(@x0) length(@b2) length(@d2)
+256 20000 30000
+CREATE LOGFILE GROUP lg
+ADD UNDOFILE './lg_group/undofile.dat'
+ INITIAL_SIZE 16M
+UNDO_BUFFER_SIZE = 1M
+ENGINE=NDB;
+CREATE TABLESPACE ts1
+ADD DATAFILE './table_space1/datafile.dat'
+ USE LOGFILE GROUP lg
+INITIAL_SIZE 12M
+ENGINE NDB;
+CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(1000), a3 BLOB)
+TABLESPACE ts1 STORAGE DISK ENGINE=NDB;
+INSERT INTO test.t1 values(1,@vc1,@d1);
+INSERT INTO test.t1 values(2,@vc2,@d2);
+explain SELECT * from test.t1 WHERE a1 = 1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
+SELECT a1,length(a2),substr(a2,1+2*900,2),length(a3),substr(a3,1+3*900,3)
+FROM test.t1 WHERE a1=1 ORDER BY a1;
+a1 length(a2) substr(a2,1+2*900,2) length(a3) substr(a3,1+3*900,3)
+1 200 3000 dd1
+SELECT a1,length(a2),substr(a2,1+2*9000,2),length(a3),substr(a3,1+3*9000,3)
+FROM test.t1 where a1=2 ORDER BY a1;
+a1 length(a2) substr(a2,1+2*9000,2) length(a3) substr(a3,1+3*9000,3)
+2 500 30000 dd2
+UPDATE test.t1 set a2=@vc2,a3=@d2 where a1=1;
+UPDATE test.t1 set a2=@vc1,a3=@d1 where a1=2;
+SELECT a1,length(a2),substr(a2,1+2*9000,2),length(a3),substr(a3,1+3*9000,3)
+FROM test.t1 where a1=1;
+a1 length(a2) substr(a2,1+2*9000,2) length(a3) substr(a3,1+3*9000,3)
+1 500 30000 dd2
+SELECT a1,length(a2),substr(a2,1+2*900,2),length(a3),substr(a3,1+3*900,3)
+FROM test.t1 where a1=2;
+a1 length(a2) substr(a2,1+2*900,2) length(a3) substr(a3,1+3*900,3)
+2 200 3000 dd1
+DELETE FROM test.t1;
+DROP TABLE test.t1;
+ALTER TABLESPACE ts1
+DROP DATAFILE './table_space1/datafile.dat'
+ ENGINE=NDB;
+DROP TABLESPACE ts1 ENGINE=NDB;
+DROP LOGFILE GROUP lg
+ENGINE=NDB;
+*****
+***** Create test that uses COUNT(), SUM(), MAX(), MIN(), NOW(), USER(), TRUNCATE
+*****
+CREATE LOGFILE GROUP lg
+ADD UNDOFILE './lg_group/undofile.dat'
+ INITIAL_SIZE 16M
+UNDO_BUFFER_SIZE = 1M
+ENGINE=NDB;
+CREATE TABLESPACE ts1
+ADD DATAFILE './table_space1/datafile.dat'
+ USE LOGFILE GROUP lg
+INITIAL_SIZE 12M
+ENGINE NDB;
+CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB, a4 DATE, a5 CHAR(250))
+TABLESPACE ts1 STORAGE DISK ENGINE=NDB;
+SELECT COUNT(*) from test.t1;
+COUNT(*)
+100
+SELECT SUM(a1) from test.t1;
+SUM(a1)
+5050
+SELECT MIN(a1) from test.t1;
+MIN(a1)
+1
+SELECT MAX(a1) from test.t1;
+MAX(a1)
+100
+SELECT a5 from test.t1 where a1=50;
+a5
+root@localhost
+SELECT * from test.t1 order by a1;
+a1 a2 a3 a4 a5
+1 aaaaaaaaaaaaaaaa1 bbbbbbbbbbbbbbbbbb1 2006-06-20 root@localhost
+2 aaaaaaaaaaaaaaaa2 bbbbbbbbbbbbbbbbbb2 2006-06-20 root@localhost
+3 aaaaaaaaaaaaaaaa3 bbbbbbbbbbbbbbbbbb3 2006-06-20 root@localhost
+4 aaaaaaaaaaaaaaaa4 bbbbbbbbbbbbbbbbbb4 2006-06-20 root@localhost
+5 aaaaaaaaaaaaaaaa5 bbbbbbbbbbbbbbbbbb5 2006-06-20 root@localhost
+6 aaaaaaaaaaaaaaaa6 bbbbbbbbbbbbbbbbbb6 2006-06-20 root@localhost
+7 aaaaaaaaaaaaaaaa7 bbbbbbbbbbbbbbbbbb7 2006-06-20 root@localhost
+8 aaaaaaaaaaaaaaaa8 bbbbbbbbbbbbbbbbbb8 2006-06-20 root@localhost
+9 aaaaaaaaaaaaaaaa9 bbbbbbbbbbbbbbbbbb9 2006-06-20 root@localhost
+10 aaaaaaaaaaaaaaaa10 bbbbbbbbbbbbbbbbbb10 2006-06-20 root@localhost
+11 aaaaaaaaaaaaaaaa11 bbbbbbbbbbbbbbbbbb11 2006-06-20 root@localhost
+12 aaaaaaaaaaaaaaaa12 bbbbbbbbbbbbbbbbbb12 2006-06-20 root@localhost
+13 aaaaaaaaaaaaaaaa13 bbbbbbbbbbbbbbbbbb13 2006-06-20 root@localhost
+14 aaaaaaaaaaaaaaaa14 bbbbbbbbbbbbbbbbbb14 2006-06-20 root@localhost
+15 aaaaaaaaaaaaaaaa15 bbbbbbbbbbbbbbbbbb15 2006-06-20 root@localhost
+16 aaaaaaaaaaaaaaaa16 bbbbbbbbbbbbbbbbbb16 2006-06-20 root@localhost
+17 aaaaaaaaaaaaaaaa17 bbbbbbbbbbbbbbbbbb17 2006-06-20 root@localhost
+18 aaaaaaaaaaaaaaaa18 bbbbbbbbbbbbbbbbbb18 2006-06-20 root@localhost
+19 aaaaaaaaaaaaaaaa19 bbbbbbbbbbbbbbbbbb19 2006-06-20 root@localhost
+20 aaaaaaaaaaaaaaaa20 bbbbbbbbbbbbbbbbbb20 2006-06-20 root@localhost
+21 aaaaaaaaaaaaaaaa21 bbbbbbbbbbbbbbbbbb21 2006-06-20 root@localhost
+22 aaaaaaaaaaaaaaaa22 bbbbbbbbbbbbbbbbbb22 2006-06-20 root@localhost
+23 aaaaaaaaaaaaaaaa23 bbbbbbbbbbbbbbbbbb23 2006-06-20 root@localhost
+24 aaaaaaaaaaaaaaaa24 bbbbbbbbbbbbbbbbbb24 2006-06-20 root@localhost
+25 aaaaaaaaaaaaaaaa25 bbbbbbbbbbbbbbbbbb25 2006-06-20 root@localhost
+26 aaaaaaaaaaaaaaaa26 bbbbbbbbbbbbbbbbbb26 2006-06-20 root@localhost
+27 aaaaaaaaaaaaaaaa27 bbbbbbbbbbbbbbbbbb27 2006-06-20 root@localhost
+28 aaaaaaaaaaaaaaaa28 bbbbbbbbbbbbbbbbbb28 2006-06-20 root@localhost
+29 aaaaaaaaaaaaaaaa29 bbbbbbbbbbbbbbbbbb29 2006-06-20 root@localhost
+30 aaaaaaaaaaaaaaaa30 bbbbbbbbbbbbbbbbbb30 2006-06-20 root@localhost
+31 aaaaaaaaaaaaaaaa31 bbbbbbbbbbbbbbbbbb31 2006-06-20 root@localhost
+32 aaaaaaaaaaaaaaaa32 bbbbbbbbbbbbbbbbbb32 2006-06-20 root@localhost
+33 aaaaaaaaaaaaaaaa33 bbbbbbbbbbbbbbbbbb33 2006-06-20 root@localhost
+34 aaaaaaaaaaaaaaaa34 bbbbbbbbbbbbbbbbbb34 2006-06-20 root@localhost
+35 aaaaaaaaaaaaaaaa35 bbbbbbbbbbbbbbbbbb35 2006-06-20 root@localhost
+36 aaaaaaaaaaaaaaaa36 bbbbbbbbbbbbbbbbbb36 2006-06-20 root@localhost
+37 aaaaaaaaaaaaaaaa37 bbbbbbbbbbbbbbbbbb37 2006-06-20 root@localhost
+38 aaaaaaaaaaaaaaaa38 bbbbbbbbbbbbbbbbbb38 2006-06-20 root@localhost
+39 aaaaaaaaaaaaaaaa39 bbbbbbbbbbbbbbbbbb39 2006-06-20 root@localhost
+40 aaaaaaaaaaaaaaaa40 bbbbbbbbbbbbbbbbbb40 2006-06-20 root@localhost
+41 aaaaaaaaaaaaaaaa41 bbbbbbbbbbbbbbbbbb41 2006-06-20 root@localhost
+42 aaaaaaaaaaaaaaaa42 bbbbbbbbbbbbbbbbbb42 2006-06-20 root@localhost
+43 aaaaaaaaaaaaaaaa43 bbbbbbbbbbbbbbbbbb43 2006-06-20 root@localhost
+44 aaaaaaaaaaaaaaaa44 bbbbbbbbbbbbbbbbbb44 2006-06-20 root@localhost
+45 aaaaaaaaaaaaaaaa45 bbbbbbbbbbbbbbbbbb45 2006-06-20 root@localhost
+46 aaaaaaaaaaaaaaaa46 bbbbbbbbbbbbbbbbbb46 2006-06-20 root@localhost
+47 aaaaaaaaaaaaaaaa47 bbbbbbbbbbbbbbbbbb47 2006-06-20 root@localhost
+48 aaaaaaaaaaaaaaaa48 bbbbbbbbbbbbbbbbbb48 2006-06-20 root@localhost
+49 aaaaaaaaaaaaaaaa49 bbbbbbbbbbbbbbbbbb49 2006-06-20 root@localhost
+50 aaaaaaaaaaaaaaaa50 bbbbbbbbbbbbbbbbbb50 2006-06-20 root@localhost
+51 aaaaaaaaaaaaaaaa51 bbbbbbbbbbbbbbbbbb51 2006-06-20 root@localhost
+52 aaaaaaaaaaaaaaaa52 bbbbbbbbbbbbbbbbbb52 2006-06-20 root@localhost
+53 aaaaaaaaaaaaaaaa53 bbbbbbbbbbbbbbbbbb53 2006-06-20 root@localhost
+54 aaaaaaaaaaaaaaaa54 bbbbbbbbbbbbbbbbbb54 2006-06-20 root@localhost
+55 aaaaaaaaaaaaaaaa55 bbbbbbbbbbbbbbbbbb55 2006-06-20 root@localhost
+56 aaaaaaaaaaaaaaaa56 bbbbbbbbbbbbbbbbbb56 2006-06-20 root@localhost
+57 aaaaaaaaaaaaaaaa57 bbbbbbbbbbbbbbbbbb57 2006-06-20 root@localhost
+58 aaaaaaaaaaaaaaaa58 bbbbbbbbbbbbbbbbbb58 2006-06-20 root@localhost
+59 aaaaaaaaaaaaaaaa59 bbbbbbbbbbbbbbbbbb59 2006-06-20 root@localhost
+60 aaaaaaaaaaaaaaaa60 bbbbbbbbbbbbbbbbbb60 2006-06-20 root@localhost
+61 aaaaaaaaaaaaaaaa61 bbbbbbbbbbbbbbbbbb61 2006-06-20 root@localhost
+62 aaaaaaaaaaaaaaaa62 bbbbbbbbbbbbbbbbbb62 2006-06-20 root@localhost
+63 aaaaaaaaaaaaaaaa63 bbbbbbbbbbbbbbbbbb63 2006-06-20 root@localhost
+64 aaaaaaaaaaaaaaaa64 bbbbbbbbbbbbbbbbbb64 2006-06-20 root@localhost
+65 aaaaaaaaaaaaaaaa65 bbbbbbbbbbbbbbbbbb65 2006-06-20 root@localhost
+66 aaaaaaaaaaaaaaaa66 bbbbbbbbbbbbbbbbbb66 2006-06-20 root@localhost
+67 aaaaaaaaaaaaaaaa67 bbbbbbbbbbbbbbbbbb67 2006-06-20 root@localhost
+68 aaaaaaaaaaaaaaaa68 bbbbbbbbbbbbbbbbbb68 2006-06-20 root@localhost
+69 aaaaaaaaaaaaaaaa69 bbbbbbbbbbbbbbbbbb69 2006-06-20 root@localhost
+70 aaaaaaaaaaaaaaaa70 bbbbbbbbbbbbbbbbbb70 2006-06-20 root@localhost
+71 aaaaaaaaaaaaaaaa71 bbbbbbbbbbbbbbbbbb71 2006-06-20 root@localhost
+72 aaaaaaaaaaaaaaaa72 bbbbbbbbbbbbbbbbbb72 2006-06-20 root@localhost
+73 aaaaaaaaaaaaaaaa73 bbbbbbbbbbbbbbbbbb73 2006-06-20 root@localhost
+74 aaaaaaaaaaaaaaaa74 bbbbbbbbbbbbbbbbbb74 2006-06-20 root@localhost
+75 aaaaaaaaaaaaaaaa75 bbbbbbbbbbbbbbbbbb75 2006-06-20 root@localhost
+76 aaaaaaaaaaaaaaaa76 bbbbbbbbbbbbbbbbbb76 2006-06-20 root@localhost
+77 aaaaaaaaaaaaaaaa77 bbbbbbbbbbbbbbbbbb77 2006-06-20 root@localhost
+78 aaaaaaaaaaaaaaaa78 bbbbbbbbbbbbbbbbbb78 2006-06-20 root@localhost
+79 aaaaaaaaaaaaaaaa79 bbbbbbbbbbbbbbbbbb79 2006-06-20 root@localhost
+80 aaaaaaaaaaaaaaaa80 bbbbbbbbbbbbbbbbbb80 2006-06-20 root@localhost
+81 aaaaaaaaaaaaaaaa81 bbbbbbbbbbbbbbbbbb81 2006-06-20 root@localhost
+82 aaaaaaaaaaaaaaaa82 bbbbbbbbbbbbbbbbbb82 2006-06-20 root@localhost
+83 aaaaaaaaaaaaaaaa83 bbbbbbbbbbbbbbbbbb83 2006-06-20 root@localhost
+84 aaaaaaaaaaaaaaaa84 bbbbbbbbbbbbbbbbbb84 2006-06-20 root@localhost
+85 aaaaaaaaaaaaaaaa85 bbbbbbbbbbbbbbbbbb85 2006-06-20 root@localhost
+86 aaaaaaaaaaaaaaaa86 bbbbbbbbbbbbbbbbbb86 2006-06-20 root@localhost
+87 aaaaaaaaaaaaaaaa87 bbbbbbbbbbbbbbbbbb87 2006-06-20 root@localhost
+88 aaaaaaaaaaaaaaaa88 bbbbbbbbbbbbbbbbbb88 2006-06-20 root@localhost
+89 aaaaaaaaaaaaaaaa89 bbbbbbbbbbbbbbbbbb89 2006-06-20 root@localhost
+90 aaaaaaaaaaaaaaaa90 bbbbbbbbbbbbbbbbbb90 2006-06-20 root@localhost
+91 aaaaaaaaaaaaaaaa91 bbbbbbbbbbbbbbbbbb91 2006-06-20 root@localhost
+92 aaaaaaaaaaaaaaaa92 bbbbbbbbbbbbbbbbbb92 2006-06-20 root@localhost
+93 aaaaaaaaaaaaaaaa93 bbbbbbbbbbbbbbbbbb93 2006-06-20 root@localhost
+94 aaaaaaaaaaaaaaaa94 bbbbbbbbbbbbbbbbbb94 2006-06-20 root@localhost
+95 aaaaaaaaaaaaaaaa95 bbbbbbbbbbbbbbbbbb95 2006-06-20 root@localhost
+96 aaaaaaaaaaaaaaaa96 bbbbbbbbbbbbbbbbbb96 2006-06-20 root@localhost
+97 aaaaaaaaaaaaaaaa97 bbbbbbbbbbbbbbbbbb97 2006-06-20 root@localhost
+98 aaaaaaaaaaaaaaaa98 bbbbbbbbbbbbbbbbbb98 2006-06-20 root@localhost
+99 aaaaaaaaaaaaaaaa99 bbbbbbbbbbbbbbbbbb99 2006-06-20 root@localhost
+100 aaaaaaaaaaaaaaaa100 bbbbbbbbbbbbbbbbbb100 2006-06-20 root@localhost
+DROP TABLE test.t1;
+ALTER TABLESPACE ts1
+DROP DATAFILE './table_space1/datafile.dat'
+ ENGINE=NDB;
+DROP TABLESPACE ts1 ENGINE=NDB;
+DROP LOGFILE GROUP lg
+ENGINE=NDB;
diff --git a/mysql-test/r/ndb_dd_backuprestore.result b/mysql-test/r/ndb_dd_backuprestore.result
index cb6c62b16da..705881ee20a 100644
--- a/mysql-test/r/ndb_dd_backuprestore.result
+++ b/mysql-test/r/ndb_dd_backuprestore.result
@@ -223,31 +223,31 @@ t6 CREATE TABLE `t6` (
) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (pk1) (PARTITION x1 VALUES LESS THAN (333) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (720) ENGINE = ndbcluster) */
SELECT * FROM information_schema.partitions WHERE table_name= 't1';
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
-NULL test t1 p0 NULL 1 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
-NULL test t1 p1 NULL 2 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
-NULL test t1 p2 NULL 3 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
-NULL test t1 p3 NULL 4 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+NULL test t1 p0 NULL 1 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default
+NULL test t1 p1 NULL 2 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default
+NULL test t1 p2 NULL 3 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default
+NULL test t1 p3 NULL 4 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default
SELECT * FROM information_schema.partitions WHERE table_name= 't2';
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
-NULL test t2 p0 NULL 1 NULL KEY NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
-NULL test t2 p1 NULL 2 NULL KEY NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+NULL test t2 p0 NULL 1 NULL KEY NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default
+NULL test t2 p1 NULL 2 NULL KEY NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default
SELECT * FROM information_schema.partitions WHERE table_name= 't3';
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
-NULL test t3 x1 NULL 1 NULL RANGE NULL c3 NULL 105 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
-NULL test t3 x2 NULL 2 NULL RANGE NULL c3 NULL 333 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
-NULL test t3 x3 NULL 3 NULL RANGE NULL c3 NULL 720 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+NULL test t3 x1 NULL 1 NULL RANGE NULL c3 NULL 105 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default
+NULL test t3 x2 NULL 2 NULL RANGE NULL c3 NULL 333 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default
+NULL test t3 x3 NULL 3 NULL RANGE NULL c3 NULL 720 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default
SELECT * FROM information_schema.partitions WHERE table_name= 't4';
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
-NULL test t4 p0 NULL 1 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
-NULL test t4 p1 NULL 2 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+NULL test t4 p0 NULL 1 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default
+NULL test t4 p1 NULL 2 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default
SELECT * FROM information_schema.partitions WHERE table_name= 't5';
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
-NULL test t5 p0 NULL 1 NULL KEY NULL pk1 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
-NULL test t5 p1 NULL 2 NULL KEY NULL pk1 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+NULL test t5 p0 NULL 1 NULL KEY NULL pk1 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default
+NULL test t5 p1 NULL 2 NULL KEY NULL pk1 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default
SELECT * FROM information_schema.partitions WHERE table_name= 't6';
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
-NULL test t6 x1 NULL 1 NULL RANGE NULL pk1 NULL 333 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
-NULL test t6 x2 NULL 2 NULL RANGE NULL pk1 NULL 720 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+NULL test t6 x1 NULL 1 NULL RANGE NULL pk1 NULL 333 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default
+NULL test t6 x2 NULL 2 NULL RANGE NULL pk1 NULL 720 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default
SELECT COUNT(*) FROM test.t1;
COUNT(*)
250
@@ -389,31 +389,31 @@ t6 CREATE TABLE `t6` (
) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (pk1) (PARTITION x1 VALUES LESS THAN (333) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (720) ENGINE = ndbcluster) */
SELECT * FROM information_schema.partitions WHERE table_name= 't1';
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
-NULL test t1 p0 NULL 1 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
-NULL test t1 p1 NULL 2 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
-NULL test t1 p2 NULL 3 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
-NULL test t1 p3 NULL 4 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+NULL test t1 p0 NULL 1 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default
+NULL test t1 p1 NULL 2 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default
+NULL test t1 p2 NULL 3 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default
+NULL test t1 p3 NULL 4 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default
SELECT * FROM information_schema.partitions WHERE table_name= 't2';
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
-NULL test t2 p0 NULL 1 NULL KEY NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
-NULL test t2 p1 NULL 2 NULL KEY NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+NULL test t2 p0 NULL 1 NULL KEY NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default
+NULL test t2 p1 NULL 2 NULL KEY NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default
SELECT * FROM information_schema.partitions WHERE table_name= 't3';
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
-NULL test t3 x1 NULL 1 NULL RANGE NULL c3 NULL 105 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
-NULL test t3 x2 NULL 2 NULL RANGE NULL c3 NULL 333 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
-NULL test t3 x3 NULL 3 NULL RANGE NULL c3 NULL 720 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+NULL test t3 x1 NULL 1 NULL RANGE NULL c3 NULL 105 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default
+NULL test t3 x2 NULL 2 NULL RANGE NULL c3 NULL 333 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default
+NULL test t3 x3 NULL 3 NULL RANGE NULL c3 NULL 720 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default
SELECT * FROM information_schema.partitions WHERE table_name= 't4';
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
-NULL test t4 p0 NULL 1 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
-NULL test t4 p1 NULL 2 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+NULL test t4 p0 NULL 1 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default
+NULL test t4 p1 NULL 2 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default
SELECT * FROM information_schema.partitions WHERE table_name= 't5';
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
-NULL test t5 p0 NULL 1 NULL KEY NULL pk1 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
-NULL test t5 p1 NULL 2 NULL KEY NULL pk1 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+NULL test t5 p0 NULL 1 NULL KEY NULL pk1 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default
+NULL test t5 p1 NULL 2 NULL KEY NULL pk1 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default
SELECT * FROM information_schema.partitions WHERE table_name= 't6';
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
-NULL test t6 x1 NULL 1 NULL RANGE NULL pk1 NULL 333 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
-NULL test t6 x2 NULL 2 NULL RANGE NULL pk1 NULL 720 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+NULL test t6 x1 NULL 1 NULL RANGE NULL pk1 NULL 333 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default
+NULL test t6 x2 NULL 2 NULL RANGE NULL pk1 NULL 720 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default
SELECT COUNT(*) FROM test.t1;
COUNT(*)
250
diff --git a/mysql-test/r/ndb_default_cluster.require b/mysql-test/r/ndb_default_cluster.require
index aa4988cdca3..3616ae0f343 100644
--- a/mysql-test/r/ndb_default_cluster.require
+++ b/mysql-test/r/ndb_default_cluster.require
@@ -1,2 +1,2 @@
Variable_name Value
-Ndb_connected_host localhost
+Ndb_config_from_host localhost
diff --git a/mysql-test/r/ndb_partition_key.result b/mysql-test/r/ndb_partition_key.result
index fd793c4c2c7..e478c23ec00 100644
--- a/mysql-test/r/ndb_partition_key.result
+++ b/mysql-test/r/ndb_partition_key.result
@@ -194,6 +194,12 @@ c2 TEXT NOT NULL,
c3 INT NOT NULL,
PRIMARY KEY(c1,c3))
ENGINE=NDB
-PARTITION BY KEY(c3);
+PARTITION BY KEY(c3)
+(PARTITION p0 NODEGROUP 0, PARTITION p1 NODEGROUP 0);
ALTER TABLE t1 ADD COLUMN c4 INT AFTER c1;
+SELECT NODEGROUP,PARTITION_NAME FROM information_schema.partitions WHERE
+table_name = "t1";
+NODEGROUP PARTITION_NAME
+0 p0
+0 p1
DROP TABLE t1;
diff --git a/mysql-test/r/ndb_partition_range.result b/mysql-test/r/ndb_partition_range.result
index 9cc9aa2cda9..f4bae479239 100644
--- a/mysql-test/r/ndb_partition_range.result
+++ b/mysql-test/r/ndb_partition_range.result
@@ -17,9 +17,9 @@ INSERT into t1 values (10, 1, 1);
INSERT into t1 values (15, 1, 1);
select * from information_schema.partitions where table_name= 't1';
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
-NULL test t1 x1 NULL 1 NULL RANGE NULL a NULL 5 0 0 0 # 0 0 # # NULL NULL default 0 default
-NULL test t1 x2 NULL 2 NULL RANGE NULL a NULL 10 0 0 0 # 0 0 # # NULL NULL default 0 default
-NULL test t1 x3 NULL 3 NULL RANGE NULL a NULL 20 0 0 0 # 0 0 # # NULL NULL default 0 default
+NULL test t1 x1 NULL 1 NULL RANGE NULL a NULL 5 0 0 0 # 0 0 # # NULL NULL default default default
+NULL test t1 x2 NULL 2 NULL RANGE NULL a NULL 10 0 0 0 # 0 0 # # NULL NULL default default default
+NULL test t1 x3 NULL 3 NULL RANGE NULL a NULL 20 0 0 0 # 0 0 # # NULL NULL default default default
select * from t1 order by a;
a b c
1 1 1
diff --git a/mysql-test/r/ndb_replace.result b/mysql-test/r/ndb_replace.result
index 8e85feb3bd3..23844ce3bff 100644
--- a/mysql-test/r/ndb_replace.result
+++ b/mysql-test/r/ndb_replace.result
@@ -31,6 +31,7 @@ SELECT * from t1 ORDER BY i;
i j k
3 1 42
17 2 NULL
+DROP TABLE t1;
CREATE TABLE t2 (a INT(11) NOT NULL,
b INT(11) NOT NULL,
c INT(11) NOT NULL,
@@ -52,3 +53,47 @@ SELECT * FROM t2 ORDER BY id;
a b c x y z id i
1 1 1 b b b 5 2
DROP TABLE t2;
+drop table if exists t1;
+create table t1 (pk int primary key, apk int unique, data int) engine=ndbcluster;
+insert into t1 values (1, 1, 1), (2, 2, 2), (3, 3, 3);
+replace into t1 (pk, apk) values (4, 1), (5, 2);
+select * from t1 order by pk;
+pk apk data
+3 3 3
+4 1 NULL
+5 2 NULL
+delete from t1;
+insert into t1 values (1, 1, 1), (2, 2, 2), (3, 3, 3);
+replace into t1 (pk, apk) values (1, 4), (2, 5);
+select * from t1 order by pk;
+pk apk data
+1 4 NULL
+2 5 NULL
+3 3 3
+delete from t1;
+insert into t1 values (1, 1, 1), (4, 4, 4), (6, 6, 6);
+load data infile '../std_data_ln/loaddata5.dat' replace into table t1 fields terminated by '' enclosed by '' ignore 1 lines (pk, apk);
+select * from t1 order by pk;
+pk apk data
+1 1 1
+3 4 NULL
+5 6 NULL
+delete from t1;
+insert into t1 values (1, 1, 1), (3, 3, 3), (5, 5, 5);
+load data infile '../std_data_ln/loaddata5.dat' replace into table t1 fields terminated by '' enclosed by '' ignore 1 lines (pk, apk);
+select * from t1 order by pk;
+pk apk data
+1 1 1
+3 4 NULL
+5 6 NULL
+delete from t1;
+insert into t1 values (1, 1, 1), (2, 2, 2), (3, 3, 3);
+replace into t1 (pk, apk) select 4, 1;
+replace into t1 (pk, apk) select 2, 4;
+select * from t1 order by pk;
+pk apk data
+2 4 NULL
+3 3 3
+4 1 NULL
+drop table t1;
+End of 5.0 tests.
diff --git a/mysql-test/r/ndb_restore.result b/mysql-test/r/ndb_restore.result
index 7dc4057e615..b946d97bea1 100644
--- a/mysql-test/r/ndb_restore.result
+++ b/mysql-test/r/ndb_restore.result
@@ -1,6 +1,6 @@
use test;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-drop table if exists t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
+drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
+drop table if exists t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c;
CREATE TABLE `t1_c` (
`capgoaledatta` smallint(5) unsigned NOT NULL auto_increment,
`goaledatta` char(2) NOT NULL default '',
@@ -116,6 +116,8 @@ CREATE TABLE `t9_c` (
PRIMARY KEY (`kattjame`,`hunderaaarbagefa`,`hassetistart`,`hassetino`)
) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
INSERT INTO `t9_c` VALUES ('3g4jh8gar2t','joe','q3.net','elredun.com','q3.net','436643316120','436643316939','91341234568968','695595699','1.1.1.1','2.2.6.2','3','86989','34','x','x','2012-03-12 18:35:04','2012-12-05 12:35:04',3123123,9569,6565,1),('4tt45345235','pap','q3plus.qt','q3plus.qt','q3.net','436643316120','436643316939','8956234534568968','5254595969','1.1.1.1','8.6.2.2','4','86989','34','x','x','2012-03-12 12:55:34','2012-12-05 11:20:04',3223433,3369,9565,2),('4545435545','john','q3.net','q3.net','acne.li','436643316120','436643316939','45345234568968','995696699','1.1.1.1','2.9.9.2','2','86998','34','x','x','2012-03-12 11:35:03','2012-12-05 08:50:04',8823123,169,3565,3);
+CREATE TABLE t10_c (a INT AUTO_INCREMENT KEY) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
+INSERT INTO t10_c VALUES (1),(2),(3);
create table t1 engine=myisam as select * from t1_c;
create table t2 engine=myisam as select * from t2_c;
create table t3 engine=myisam as select * from t3_c;
@@ -125,6 +127,7 @@ create table t6 engine=myisam as select * from t6_c;
create table t7 engine=myisam as select * from t7_c;
create table t8 engine=myisam as select * from t8_c;
create table t9 engine=myisam as select * from t9_c;
+create table t10 engine=myisam as select * from t10_c;
CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP;
DELETE FROM test.backup_info;
LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ',';
@@ -132,7 +135,7 @@ SELECT @the_backup_id:=backup_id FROM test.backup_info;
@the_backup_id:=backup_id
<the_backup_id>
DROP TABLE test.backup_info;
-drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
+drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c;
select count(*) from t1;
count(*)
5
@@ -232,20 +235,11 @@ from (select * from t9 union
select * from t9_c) a;
count(*)
3
-ALTER TABLE t1_c
-PARTITION BY RANGE (`capgoaledatta`)
-(PARTITION p0 VALUES LESS THAN MAXVALUE);
-ALTER TABLE t2_c
-PARTITION BY LIST(`capgotod`)
-(PARTITION p0 VALUES IN (0,1,2,3,4,5,6));
-ALTER TABLE t3_c
-PARTITION BY HASH (`CapGoaledatta`);
-ALTER TABLE t5_c
-PARTITION BY HASH (`capfa`)
-PARTITIONS 4;
-ALTER TABLE t6_c
-PARTITION BY LINEAR HASH (`relatta`)
-PARTITIONS 4;
+select * from t10_c order by a;
+a
+1
+2
+3
ALTER TABLE t7_c
PARTITION BY LINEAR KEY (`dardtestard`);
CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP;
@@ -255,7 +249,7 @@ SELECT @the_backup_id:=backup_id FROM test.backup_info;
@the_backup_id:=backup_id
<the_backup_id>
DROP TABLE test.backup_info;
-drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
+drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c;
select count(*) from t1;
count(*)
5
@@ -355,7 +349,7 @@ from (select * from t9 union
select * from t9_c) a;
count(*)
3
-drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
+drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c;
select count(*) from t1;
count(*)
5
@@ -455,7 +449,7 @@ from (select * from t9 union
select * from t9_c) a;
count(*)
3
-drop table t1_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
+drop table t1_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c;
CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP;
DELETE FROM test.backup_info;
LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ',';
@@ -463,7 +457,6 @@ SELECT @the_backup_id:=backup_id FROM test.backup_info;
@the_backup_id:=backup_id
<the_backup_id>
DROP TABLE test.backup_info;
-Create table test/def/t2_c failed: Translate frm error
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
+drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
drop table if exists t2_c;
520093696,<the_backup_id>
diff --git a/mysql-test/r/ndb_trigger.result b/mysql-test/r/ndb_trigger.result
new file mode 100644
index 00000000000..27f83df70c9
--- /dev/null
+++ b/mysql-test/r/ndb_trigger.result
@@ -0,0 +1,119 @@
+drop table if exists t1, t2, t3;
+create table t1 (id int primary key, a int not null, b decimal (63,30) default 0) engine=ndb;
+create table t2 (op char(1), a int not null, b decimal (63,30));
+create table t3 select 1 as i;
+create trigger t1_bu before update on t1 for each row
+begin
+insert into t2 values ("u", old.a, old.b);
+set new.b = old.b + 10;
+end;//
+create trigger t1_bd before delete on t1 for each row
+begin
+insert into t2 values ("d", old.a, old.b);
+end;//
+insert into t1 values (1, 1, 1.05), (2, 2, 2.05), (3, 3, 3.05), (4, 4, 4.05);
+update t1 set a=5 where a != 3;
+select * from t1 order by id;
+id a b
+1 5 11.050000000000000000000000000000
+2 5 12.050000000000000000000000000000
+3 3 3.050000000000000000000000000000
+4 5 14.050000000000000000000000000000
+select * from t2 order by op, a, b;
+op a b
+u 1 1.050000000000000000000000000000
+u 2 2.050000000000000000000000000000
+u 4 4.050000000000000000000000000000
+delete from t2;
+update t1, t3 set a=6 where a = 5;
+select * from t1 order by id;
+id a b
+1 6 21.050000000000000000000000000000
+2 6 22.050000000000000000000000000000
+3 3 3.050000000000000000000000000000
+4 6 24.050000000000000000000000000000
+select * from t2 order by op, a, b;
+op a b
+u 5 11.050000000000000000000000000000
+u 5 12.050000000000000000000000000000
+u 5 14.050000000000000000000000000000
+delete from t2;
+delete from t1 where a != 3;
+select * from t1 order by id;
+id a b
+3 3 3.050000000000000000000000000000
+select * from t2 order by op, a, b;
+op a b
+d 6 21.050000000000000000000000000000
+d 6 22.050000000000000000000000000000
+d 6 24.050000000000000000000000000000
+delete from t2;
+insert into t1 values (1, 1, 1.05), (2, 2, 2.05), (4, 4, 4.05);
+delete t1 from t1, t3 where a != 3;
+select * from t1 order by id;
+id a b
+3 3 3.050000000000000000000000000000
+select * from t2 order by op, a, b;
+op a b
+d 1 1.050000000000000000000000000000
+d 2 2.050000000000000000000000000000
+d 4 4.050000000000000000000000000000
+delete from t2;
+insert into t1 values (4, 4, 4.05);
+insert into t1 (id, a) values (4, 1), (3, 1) on duplicate key update a= a + 1;
+select * from t1 order by id;
+id a b
+3 4 13.050000000000000000000000000000
+4 5 14.050000000000000000000000000000
+select * from t2 order by op, a, b;
+op a b
+u 3 3.050000000000000000000000000000
+u 4 4.050000000000000000000000000000
+delete from t2;
+delete from t3;
+insert into t3 values (4), (3);
+insert into t1 (id, a) (select i, 1 from t3) on duplicate key update a= a + 1;
+select * from t1 order by id;
+id a b
+3 5 23.050000000000000000000000000000
+4 6 24.050000000000000000000000000000
+select * from t2 order by op, a, b;
+op a b
+u 4 13.050000000000000000000000000000
+u 5 14.050000000000000000000000000000
+delete from t2;
+replace into t1 (id, a) values (4, 1), (3, 1);
+select * from t1 order by id;
+id a b
+3 1 0.000000000000000000000000000000
+4 1 0.000000000000000000000000000000
+select * from t2 order by op, a, b;
+op a b
+d 5 23.050000000000000000000000000000
+d 6 24.050000000000000000000000000000
+delete from t1;
+delete from t2;
+insert into t1 values (3, 1, 1.05), (4, 1, 2.05);
+replace into t1 (id, a) (select i, 2 from t3);
+select * from t1 order by id;
+id a b
+3 2 0.000000000000000000000000000000
+4 2 0.000000000000000000000000000000
+select * from t2 order by op, a, b;
+op a b
+d 1 1.050000000000000000000000000000
+d 1 2.050000000000000000000000000000
+delete from t1;
+delete from t2;
+insert into t1 values (3, 1, 1.05), (5, 2, 2.05);
+load data infile '../std_data_ln/loaddata5.dat' replace into table t1 fields terminated by '' enclosed by '' ignore 1 lines (id, a);
+select * from t1 order by id;
+id a b
+3 4 0.000000000000000000000000000000
+5 6 0.000000000000000000000000000000
+select * from t2 order by op, a, b;
+op a b
+d 1 1.050000000000000000000000000000
+d 2 2.050000000000000000000000000000
+drop tables t1, t2, t3;
+End of 5.0 tests
diff --git a/mysql-test/r/odbc.result b/mysql-test/r/odbc.result
index 2d9d39393b1..5629d3dab33 100644
--- a/mysql-test/r/odbc.result
+++ b/mysql-test/r/odbc.result
@@ -14,3 +14,14 @@ explain select * from t1 where b is null;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
drop table t1;
+CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY);
+INSERT INTO t1 VALUES (NULL);
+SELECT sql_no_cache a, last_insert_id() FROM t1 WHERE a IS NULL;
+a last_insert_id()
+1 1
+SELECT sql_no_cache a, last_insert_id() FROM t1 WHERE a IS NULL;
+a last_insert_id()
+SELECT sql_no_cache a, last_insert_id() FROM t1;
+a last_insert_id()
+1 1
+DROP TABLE t1;
diff --git a/mysql-test/r/olap.result b/mysql-test/r/olap.result
index c07f025c8ef..993426a02fc 100644
--- a/mysql-test/r/olap.result
+++ b/mysql-test/r/olap.result
@@ -620,8 +620,8 @@ CREATE VIEW v1 AS
SELECT a, LENGTH(a), COUNT(*) FROM t1 GROUP BY a WITH ROLLUP;
DESC v1;
Field Type Null Key Default Extra
-a bigint(11) YES NULL
-LENGTH(a) bigint(10) YES NULL
+a int(11) YES 0
+LENGTH(a) int(10) YES NULL
COUNT(*) bigint(21) NO 0
SELECT * FROM v1;
a LENGTH(a) COUNT(*)
diff --git a/mysql-test/r/partition.result b/mysql-test/r/partition.result
index 47ee2be6f51..e95489864f7 100644
--- a/mysql-test/r/partition.result
+++ b/mysql-test/r/partition.result
@@ -708,11 +708,6 @@ partition by list (a)
alter table t1 rebuild partition;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '' at line 1
drop table t1;
-create table t1 (a int) engine=innodb partition by hash(a) ;
-show table status like 't1';
-Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
-t1 InnoDB 10 Compact 2 8192 16384 0 0 0 NULL NULL NULL NULL latin1_swedish_ci NULL partitioned
-drop table t1;
create table t2 (s1 int not null auto_increment, primary key (s1)) partition by list (s1) (partition p1 values in (1),partition p2 values in (2),partition p3 values in (3),partition p4 values in (4));
insert into t2 values (null),(null),(null);
select * from t2;
@@ -787,6 +782,7 @@ CALL test.p1(13);
Warnings:
Warning 1196 Some non-transactional changed tables couldn't be rolled back
drop table t1;
+drop procedure test.p1;
CREATE TABLE t1 (a int not null)
partition by key(a)
(partition p0 COMMENT='first partition');
@@ -1057,4 +1053,68 @@ alter table t1 add partition (partition p2 values in (3));
alter table t1 drop partition p2;
use test;
drop database db99;
+drop procedure if exists mysqltest_1;
+create table t1 (a int)
+partition by list (a)
+(partition p0 values in (0));
+insert into t1 values (0);
+create procedure mysqltest_1 ()
+begin
+begin
+declare continue handler for sqlexception begin end;
+update ignore t1 set a = 1 where a = 0;
+end;
+prepare stmt1 from 'alter table t1';
+execute stmt1;
+end//
+call mysqltest_1()//
+drop table t1;
+drop procedure mysqltest_1;
+create table t1 (a int, index(a))
+partition by hash(a);
+insert into t1 values (1),(2);
+select * from t1 ORDER BY a DESC;
+a
+2
+1
+drop table t1;
+create table t1 (a int) engine myisam
+partition by range (a)
+subpartition by hash (a)
+(partition p0 VALUES LESS THAN (1) DATA DIRECTORY = 'hello/master-data/tmpdata' INDEX DIRECTORY = 'hello/master-data/tmpinx'
+(SUBPARTITION subpart00, SUBPARTITION subpart01));
+hello/master-data/test/t1#P#p0#SP#subpart00.MYD
+hello/master-data/test/t1#P#p0#SP#subpart00.MYI
+hello/master-data/test/t1#P#p0#SP#subpart01.MYD
+hello/master-data/test/t1#P#p0#SP#subpart01.MYI
+hello/master-data/test/t1.frm
+hello/master-data/test/t1.par
+hello/master-data/tmpdata/t1#P#p0#SP#subpart00.MYD
+hello/master-data/tmpdata/t1#P#p0#SP#subpart01.MYD
+hello/master-data/tmpinx/t1#P#p0#SP#subpart00.MYI
+hello/master-data/tmpinx/t1#P#p0#SP#subpart01.MYI
+ALTER TABLE t1 REORGANIZE PARTITION p0 INTO
+(partition p1 VALUES LESS THAN (1) DATA DIRECTORY = 'hello/master-data/tmpdata' INDEX DIRECTORY = 'hello/master-data/tmpinx'
+(SUBPARTITION subpart10, SUBPARTITION subpart11),
+partition p2 VALUES LESS THAN (2) DATA DIRECTORY = 'hello/master-data/tmpdata' INDEX DIRECTORY = 'hello/master-data/tmpinx'
+(SUBPARTITION subpart20, SUBPARTITION subpart21));
+hello/master-data/test/t1#P#p1#SP#subpart10.MYD
+hello/master-data/test/t1#P#p1#SP#subpart10.MYI
+hello/master-data/test/t1#P#p1#SP#subpart11.MYD
+hello/master-data/test/t1#P#p1#SP#subpart11.MYI
+hello/master-data/test/t1#P#p2#SP#subpart20.MYD
+hello/master-data/test/t1#P#p2#SP#subpart20.MYI
+hello/master-data/test/t1#P#p2#SP#subpart21.MYD
+hello/master-data/test/t1#P#p2#SP#subpart21.MYI
+hello/master-data/test/t1.frm
+hello/master-data/test/t1.par
+hello/master-data/tmpdata/t1#P#p1#SP#subpart10.MYD
+hello/master-data/tmpdata/t1#P#p1#SP#subpart11.MYD
+hello/master-data/tmpdata/t1#P#p2#SP#subpart20.MYD
+hello/master-data/tmpdata/t1#P#p2#SP#subpart21.MYD
+hello/master-data/tmpinx/t1#P#p1#SP#subpart10.MYI
+hello/master-data/tmpinx/t1#P#p1#SP#subpart11.MYI
+hello/master-data/tmpinx/t1#P#p2#SP#subpart20.MYI
+hello/master-data/tmpinx/t1#P#p2#SP#subpart21.MYI
+drop table t1;
End of 5.1 tests
diff --git a/mysql-test/r/partition_hash.result b/mysql-test/r/partition_hash.result
index 08faccd024e..9a82a36d902 100644
--- a/mysql-test/r/partition_hash.result
+++ b/mysql-test/r/partition_hash.result
@@ -1,4 +1,88 @@
drop table if exists t1;
+create table t1 (a int unsigned)
+partition by hash(a div 2)
+partitions 4;
+insert into t1 values (null),(0),(1),(2),(3),(4),(5),(6),(7);
+select * from t1 where a < 0;
+a
+select * from t1 where a is null or (a >= 5 and a <= 7);
+a
+NULL
+5
+6
+7
+select * from t1 where a is null;
+a
+NULL
+select * from t1 where a is not null;
+a
+0
+1
+2
+3
+4
+5
+6
+7
+select * from t1 where a >= 1 and a < 3;
+a
+1
+2
+select * from t1 where a >= 3 and a <= 5;
+a
+3
+4
+5
+select * from t1 where a > 2 and a < 4;
+a
+3
+select * from t1 where a > 3 and a <= 6;
+a
+4
+5
+6
+select * from t1 where a > 5;
+a
+6
+7
+select * from t1 where a >= 1 and a <= 5;
+a
+1
+2
+3
+4
+5
+explain partitions select * from t1 where a < 0;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p0,p1,p2,p3 ALL NULL NULL NULL NULL 9 Using where
+explain partitions select * from t1 where a is null or (a >= 5 and a <= 7);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p0,p2,p3 ALL NULL NULL NULL NULL 7 Using where
+explain partitions select * from t1 where a is null;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p0 ALL NULL NULL NULL NULL 3 Using where
+explain partitions select * from t1 where a is not null;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p0,p1,p2,p3 ALL NULL NULL NULL NULL 9 Using where
+explain partitions select * from t1 where a >= 1 and a < 3;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p0,p1 ALL NULL NULL NULL NULL 5 Using where
+explain partitions select * from t1 where a >= 3 and a <= 5;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p1,p2 ALL NULL NULL NULL NULL 4 Using where
+explain partitions select * from t1 where a > 2 and a < 4;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p1 ALL NULL NULL NULL NULL 2 Using where
+explain partitions select * from t1 where a > 3 and a <= 6;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p2,p3 ALL NULL NULL NULL NULL 4 Using where
+explain partitions select * from t1 where a > 5;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p0,p1,p2,p3 ALL NULL NULL NULL NULL 9 Using where
+explain partitions select * from t1 where a >= 1 and a <= 5;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p0,p1,p2,p3 ALL NULL NULL NULL NULL 9 Using where
+drop table t1;
CREATE TABLE t1 (
a int not null,
b int not null,
diff --git a/mysql-test/r/partition_innodb.result b/mysql-test/r/partition_innodb.result
index 5e5931fdbf8..f4e75ccdb05 100644
--- a/mysql-test/r/partition_innodb.result
+++ b/mysql-test/r/partition_innodb.result
@@ -1,109 +1,5 @@
-SET @max_row = 20;
-DROP TABLE IF EXISTS t0_template;
-CREATE TABLE t0_template (
-f_int1 INTEGER,
-f_int2 INTEGER,
-f_char1 CHAR(20),
-f_char2 CHAR(20),
-f_charbig VARCHAR(1000) ,
-PRIMARY KEY(f_int1))
-ENGINE = MEMORY;
-INSERT INTO t0_template
-SET f_int1 = 20, f_int2 = 20, f_char1 = '20', f_char2 = '20',
-f_charbig = '===20===';
-INSERT INTO t0_template
-SET f_int1 = 19, f_int2 = 19, f_char1 = '19', f_char2 = '19',
-f_charbig = '===19===';
-INSERT INTO t0_template
-SET f_int1 = 18, f_int2 = 18, f_char1 = '18', f_char2 = '18',
-f_charbig = '===18===';
-INSERT INTO t0_template
-SET f_int1 = 17, f_int2 = 17, f_char1 = '17', f_char2 = '17',
-f_charbig = '===17===';
-INSERT INTO t0_template
-SET f_int1 = 16, f_int2 = 16, f_char1 = '16', f_char2 = '16',
-f_charbig = '===16===';
-INSERT INTO t0_template
-SET f_int1 = 15, f_int2 = 15, f_char1 = '15', f_char2 = '15',
-f_charbig = '===15===';
-INSERT INTO t0_template
-SET f_int1 = 14, f_int2 = 14, f_char1 = '14', f_char2 = '14',
-f_charbig = '===14===';
-INSERT INTO t0_template
-SET f_int1 = 13, f_int2 = 13, f_char1 = '13', f_char2 = '13',
-f_charbig = '===13===';
-INSERT INTO t0_template
-SET f_int1 = 12, f_int2 = 12, f_char1 = '12', f_char2 = '12',
-f_charbig = '===12===';
-INSERT INTO t0_template
-SET f_int1 = 11, f_int2 = 11, f_char1 = '11', f_char2 = '11',
-f_charbig = '===11===';
-INSERT INTO t0_template
-SET f_int1 = 10, f_int2 = 10, f_char1 = '10', f_char2 = '10',
-f_charbig = '===10===';
-INSERT INTO t0_template
-SET f_int1 = 9, f_int2 = 9, f_char1 = '9', f_char2 = '9',
-f_charbig = '===9===';
-INSERT INTO t0_template
-SET f_int1 = 8, f_int2 = 8, f_char1 = '8', f_char2 = '8',
-f_charbig = '===8===';
-INSERT INTO t0_template
-SET f_int1 = 7, f_int2 = 7, f_char1 = '7', f_char2 = '7',
-f_charbig = '===7===';
-INSERT INTO t0_template
-SET f_int1 = 6, f_int2 = 6, f_char1 = '6', f_char2 = '6',
-f_charbig = '===6===';
-INSERT INTO t0_template
-SET f_int1 = 5, f_int2 = 5, f_char1 = '5', f_char2 = '5',
-f_charbig = '===5===';
-INSERT INTO t0_template
-SET f_int1 = 4, f_int2 = 4, f_char1 = '4', f_char2 = '4',
-f_charbig = '===4===';
-INSERT INTO t0_template
-SET f_int1 = 3, f_int2 = 3, f_char1 = '3', f_char2 = '3',
-f_charbig = '===3===';
-INSERT INTO t0_template
-SET f_int1 = 2, f_int2 = 2, f_char1 = '2', f_char2 = '2',
-f_charbig = '===2===';
-INSERT INTO t0_template
-SET f_int1 = 1, f_int2 = 1, f_char1 = '1', f_char2 = '1',
-f_charbig = '===1===';
-DROP TABLE IF EXISTS t1;
-CREATE TABLE t1 (f_date DATE, f_varchar VARCHAR(30)) engine='InnoDB';
-INSERT INTO t1 (f_date, f_varchar)
-SELECT CONCAT(CAST((f_int1 + 999) AS CHAR),'-02-10'), CAST(f_char1 AS CHAR)
-FROM t0_template
-WHERE f_int1 + 999 BETWEEN 1000 AND 9999;
-SELECT IF(9999 - 1000 + 1 > @max_row, @max_row , 9999 - 1000 + 1)
-INTO @exp_row_count;
-ALTER TABLE t1 PARTITION BY HASH(CAST(YEAR(f_date) AS SIGNED INTEGER));
-# 1.1.5 Add two named partitions + test
-ALTER TABLE t1 ADD PARTITION (PARTITION part1, PARTITION part7);
-drop table t1;
-CREATE TABLE t1 (f_date DATE, f_varchar VARCHAR(30))
-ENGINE=InnoDB
-PARTITION BY HASH(CAST(YEAR(f_date) AS SIGNED INTEGER));
-# This statement crashes the server.
-# CREATE partitioned table with three partitions in one step
-# would be harmless.
-ALTER TABLE t1 ADD PARTITION PARTITIONS 1;
-DROP VIEW IF EXISTS v1;
-DROP TABLE IF EXISTS t1;
-DROP TABLE IF EXISTS t0_aux;
-DROP TABLE IF EXISTS t0_definition;
-DROP TABLE IF EXISTS t0_template;
-create table t1 (id varchar(64) primary key) engine=innodb
-partition by key(id) partitions 5;
-insert into t1 values ('a');
-insert into t1 values ('aa');
-insert into t1 values ('aaa');
-select * from t1 where id = 'a';
-id
-a
-select * from t1 where id = 'aa';
-id
-aa
-select * from t1 where id = 'aaa';
-id
-aaa
+create table t1 (a int) engine=innodb partition by hash(a) ;
+show table status like 't1';
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
+t1 InnoDB 10 Compact 2 8192 16384 0 0 0 NULL NULL NULL NULL latin1_swedish_ci NULL partitioned
drop table t1;
diff --git a/mysql-test/r/partition_list.result b/mysql-test/r/partition_list.result
index c722a3c6be3..e64a7a8d154 100644
--- a/mysql-test/r/partition_list.result
+++ b/mysql-test/r/partition_list.result
@@ -1,4 +1,98 @@
drop table if exists t1;
+create table t1 (a int unsigned)
+partition by list (a)
+(partition p0 values in (0),
+partition p1 values in (1),
+partition pnull values in (null),
+partition p2 values in (2));
+insert into t1 values (null),(0),(1),(2);
+select * from t1 where a < 2;
+a
+0
+1
+select * from t1 where a <= 0;
+a
+0
+select * from t1 where a < 1;
+a
+0
+select * from t1 where a > 0;
+a
+1
+2
+select * from t1 where a > 1;
+a
+2
+select * from t1 where a >= 0;
+a
+0
+1
+2
+select * from t1 where a >= 1;
+a
+1
+2
+select * from t1 where a is null;
+a
+NULL
+select * from t1 where a is not null;
+a
+0
+1
+2
+select * from t1 where a is null or a > 0;
+a
+1
+NULL
+2
+drop table t1;
+create table t1 (a int unsigned, b int)
+partition by list (a)
+subpartition by hash (b)
+subpartitions 2
+(partition p0 values in (0),
+partition p1 values in (1),
+partition pnull values in (null, 2),
+partition p3 values in (3));
+insert into t1 values (0,0),(0,1),(1,0),(1,1),(null,0),(null,1);
+insert into t1 values (2,0),(2,1),(3,0),(3,1);
+explain partitions select * from t1 where a is null;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pnull_pnullsp0,pnull_pnullsp1 ALL NULL NULL NULL NULL 4 Using where
+select * from t1 where a is null;
+a b
+NULL 0
+NULL 1
+explain partitions select * from t1 where a = 2;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pnull_pnullsp0,pnull_pnullsp1 ALL NULL NULL NULL NULL 4 Using where
+select * from t1 where a = 2;
+a b
+2 0
+2 1
+select * from t1 where a <= 0;
+a b
+0 0
+0 1
+select * from t1 where a < 3;
+a b
+0 0
+0 1
+1 0
+1 1
+2 0
+2 1
+select * from t1 where a >= 1 or a is null;
+a b
+1 0
+1 1
+NULL 0
+2 0
+NULL 1
+2 1
+3 0
+3 1
+drop table t1;
CREATE TABLE t1 (
a int not null,
b int not null,
diff --git a/mysql-test/r/partition_mgm.result b/mysql-test/r/partition_mgm.result
index 0f2c8c57872..f64ffaff495 100644
--- a/mysql-test/r/partition_mgm.result
+++ b/mysql-test/r/partition_mgm.result
@@ -24,3 +24,15 @@ hello/master-data/test/t1#P#p0.MYD
hello/master-data/test/t1#P#p0.MYI
hello/master-data/test/t1.frm
hello/master-data/test/t1.par
+drop table t1;
+create table t1 (a int)
+partition by list (a)
+subpartition by hash (a)
+(partition p11 values in (1,2),
+partition p12 values in (3,4));
+alter table t1 REORGANIZE partition p11, p12 INTO
+(partition p1 values in (1,2,3,4));
+alter table t1 REORGANIZE partition p1 INTO
+(partition p11 values in (1,2),
+partition p12 values in (3,4));
+drop table t1;
diff --git a/mysql-test/r/partition_order.result b/mysql-test/r/partition_order.result
index 7a1ab1d6dc8..78ff7cd3121 100644
--- a/mysql-test/r/partition_order.result
+++ b/mysql-test/r/partition_order.result
@@ -718,7 +718,11 @@ partitions 2
partition x2 values less than (100));
INSERT into t1 values (1, 1);
INSERT into t1 values (5, NULL);
-INSERT into t1 values (2, 5);
+INSERT into t1 values (2, 4);
+INSERT into t1 values (3, 3);
+INSERT into t1 values (4, 5);
+INSERT into t1 values (7, 1);
+INSERT into t1 values (6, 6);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
INSERT into t1 values (40, NULL);
@@ -727,7 +731,55 @@ a b
5 NULL
40 NULL
1 1
+7 1
35 2
+3 3
+2 4
30 4
-2 5
+4 5
+6 6
+select * from t1 force index (b) where b < 10 ORDER BY b;
+a b
+1 1
+7 1
+35 2
+3 3
+2 4
+30 4
+4 5
+6 6
+select * from t1 force index (b) where b < 10 ORDER BY b DESC;
+a b
+6 6
+4 5
+2 4
+30 4
+3 3
+35 2
+7 1
+1 1
+drop table t1;
+create table t1 (a int not null, b int, c varchar(20), key (a,b,c))
+partition by range (b)
+(partition p0 values less than (5),
+partition p1 values less than (10));
+INSERT into t1 values (1,1,'1'),(2,2,'2'),(1,3,'3'),(2,4,'4'),(1,5,'5');
+INSERT into t1 values (2,6,'6'),(1,7,'7'),(2,8,'8'),(1,9,'9');
+INSERT into t1 values (1, NULL, NULL), (2, NULL, '10');
+select * from t1 where a = 1 order by a desc, b desc;
+a b c
+1 9 9
+1 7 7
+1 5 5
+1 3 3
+1 1 1
+1 NULL NULL
+select * from t1 where a = 1 order by b desc;
+a b c
+1 9 9
+1 7 7
+1 5 5
+1 3 3
+1 1 1
+1 NULL NULL
drop table t1;
diff --git a/mysql-test/r/partition_pruning.result b/mysql-test/r/partition_pruning.result
index 58e7427d0f7..c7f1861f6b1 100644
--- a/mysql-test/r/partition_pruning.result
+++ b/mysql-test/r/partition_pruning.result
@@ -149,6 +149,48 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
explain partitions select * from t6 where a > 3 and a < 5;
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+drop table t6;
+create table t6 (a int unsigned not null) partition by LIST(a) (
+partition p1 values in (1),
+partition p3 values in (3),
+partition p5 values in (5),
+partition p7 values in (7),
+partition p9 values in (9)
+);
+insert into t6 values (1),(3),(5);
+explain partitions select * from t6 where a < 1;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+explain partitions select * from t6 where a <= 1;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t6 p1 system NULL NULL NULL NULL 1
+explain partitions select * from t6 where a > 9;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+explain partitions select * from t6 where a >= 9;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+explain partitions select * from t6 where a > 0 and a < 5;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t6 p1,p3 ALL NULL NULL NULL NULL 2 Using where
+explain partitions select * from t6 where a > 5 and a < 12;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+explain partitions select * from t6 where a > 3 and a < 8 ;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t6 p5,p7 system NULL NULL NULL NULL 1
+explain partitions select * from t6 where a >= 0 and a <= 5;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t6 p1,p3,p5 ALL NULL NULL NULL NULL 3 Using where
+explain partitions select * from t6 where a >= 5 and a <= 12;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t6 p5,p7,p9 system NULL NULL NULL NULL 1
+explain partitions select * from t6 where a >= 3 and a <= 8;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t6 p3,p5,p7 ALL NULL NULL NULL NULL 2 Using where
+explain partitions select * from t6 where a > 3 and a < 5;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
create table t7 (a int not null) partition by RANGE(a) (
partition p10 values less than (10),
partition p30 values less than (30),
@@ -184,6 +226,42 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
explain partitions select * from t7 where a > 11 and a < 29;
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+drop table t7;
+create table t7 (a int unsigned not null) partition by RANGE(a) (
+partition p10 values less than (10),
+partition p30 values less than (30),
+partition p50 values less than (50),
+partition p70 values less than (70),
+partition p90 values less than (90)
+);
+insert into t7 values (10),(30),(50);
+explain partitions select * from t7 where a < 5;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+explain partitions select * from t7 where a < 10;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+explain partitions select * from t7 where a <= 10;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t7 p10,p30 system NULL NULL NULL NULL 1
+explain partitions select * from t7 where a = 10;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t7 p30 system NULL NULL NULL NULL 1
+explain partitions select * from t7 where a < 90;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t7 p10,p30,p50,p70,p90 ALL NULL NULL NULL NULL 3 Using where
+explain partitions select * from t7 where a = 90;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+explain partitions select * from t7 where a > 90;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+explain partitions select * from t7 where a >= 90;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+explain partitions select * from t7 where a > 11 and a < 29;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
create table t8 (a date not null) partition by RANGE(YEAR(a)) (
partition p0 values less than (1980),
partition p1 values less than (1990),
diff --git a/mysql-test/r/partition_range.result b/mysql-test/r/partition_range.result
index 3cbb517053a..9812c80040b 100644
--- a/mysql-test/r/partition_range.result
+++ b/mysql-test/r/partition_range.result
@@ -1,4 +1,82 @@
drop table if exists t1;
+create table t1 (a int unsigned)
+partition by range (a)
+(partition pnull values less than (0),
+partition p0 values less than (1),
+partition p1 values less than(2));
+insert into t1 values (null),(0),(1);
+select * from t1 where a is null;
+a
+NULL
+select * from t1 where a >= 0;
+a
+0
+1
+select * from t1 where a < 0;
+a
+select * from t1 where a <= 0;
+a
+0
+select * from t1 where a > 1;
+a
+explain partitions select * from t1 where a is null;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pnull system NULL NULL NULL NULL 1
+explain partitions select * from t1 where a >= 0;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p0,p1 ALL NULL NULL NULL NULL 2 Using where
+explain partitions select * from t1 where a < 0;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+explain partitions select * from t1 where a <= 0;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pnull,p0 ALL NULL NULL NULL NULL 2 Using where
+explain partitions select * from t1 where a > 1;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+drop table t1;
+create table t1 (a int unsigned, b int unsigned)
+partition by range (a)
+subpartition by hash (b)
+subpartitions 2
+(partition pnull values less than (0),
+partition p0 values less than (1),
+partition p1 values less than(2));
+insert into t1 values (null,0),(null,1),(0,0),(0,1),(1,0),(1,1);
+select * from t1 where a is null;
+a b
+NULL 0
+NULL 1
+select * from t1 where a >= 0;
+a b
+0 0
+0 1
+1 0
+1 1
+select * from t1 where a < 0;
+a b
+select * from t1 where a <= 0;
+a b
+0 0
+0 1
+select * from t1 where a > 1;
+a b
+explain partitions select * from t1 where a is null;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pnull_pnullsp0,pnull_pnullsp1 ALL NULL NULL NULL NULL 2 Using where
+explain partitions select * from t1 where a >= 0;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p0_p0sp0,p0_p0sp1,p1_p1sp0,p1_p1sp1 ALL NULL NULL NULL NULL 4 Using where
+explain partitions select * from t1 where a < 0;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pnull_pnullsp0,pnull_pnullsp1 ALL NULL NULL NULL NULL 2 Using where
+explain partitions select * from t1 where a <= 0;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pnull_pnullsp0,pnull_pnullsp1,p0_p0sp0,p0_p0sp1 ALL NULL NULL NULL NULL 4 Using where
+explain partitions select * from t1 where a > 1;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p1_p1sp0,p1_p1sp1 ALL NULL NULL NULL NULL 2 Using where
+drop table t1;
CREATE TABLE t1 (
a int not null,
b int not null,
@@ -519,3 +597,115 @@ partition p3 values less than (1998),
partition p4 values less than (1999),
partition p5 values less than (2000));
drop table t1;
+CREATE TABLE t1 (a date)
+PARTITION BY RANGE (TO_DAYS(a))
+(PARTITION p3xx VALUES LESS THAN (TO_DAYS('2004-01-01')),
+PARTITION p401 VALUES LESS THAN (TO_DAYS('2004-02-01')),
+PARTITION p402 VALUES LESS THAN (TO_DAYS('2004-03-01')),
+PARTITION p403 VALUES LESS THAN (TO_DAYS('2004-04-01')),
+PARTITION p404 VALUES LESS THAN (TO_DAYS('2004-05-01')),
+PARTITION p405 VALUES LESS THAN (TO_DAYS('2004-06-01')),
+PARTITION p406 VALUES LESS THAN (TO_DAYS('2004-07-01')),
+PARTITION p407 VALUES LESS THAN (TO_DAYS('2004-08-01')),
+PARTITION p408 VALUES LESS THAN (TO_DAYS('2004-09-01')),
+PARTITION p409 VALUES LESS THAN (TO_DAYS('2004-10-01')),
+PARTITION p410 VALUES LESS THAN (TO_DAYS('2004-11-01')),
+PARTITION p411 VALUES LESS THAN (TO_DAYS('2004-12-01')),
+PARTITION p412 VALUES LESS THAN (TO_DAYS('2005-01-01')),
+PARTITION p501 VALUES LESS THAN (TO_DAYS('2005-02-01')),
+PARTITION p502 VALUES LESS THAN (TO_DAYS('2005-03-01')),
+PARTITION p503 VALUES LESS THAN (TO_DAYS('2005-04-01')),
+PARTITION p504 VALUES LESS THAN (TO_DAYS('2005-05-01')),
+PARTITION p505 VALUES LESS THAN (TO_DAYS('2005-06-01')),
+PARTITION p506 VALUES LESS THAN (TO_DAYS('2005-07-01')),
+PARTITION p507 VALUES LESS THAN (TO_DAYS('2005-08-01')),
+PARTITION p508 VALUES LESS THAN (TO_DAYS('2005-09-01')),
+PARTITION p509 VALUES LESS THAN (TO_DAYS('2005-10-01')),
+PARTITION p510 VALUES LESS THAN (TO_DAYS('2005-11-01')),
+PARTITION p511 VALUES LESS THAN (TO_DAYS('2005-12-01')),
+PARTITION p512 VALUES LESS THAN (TO_DAYS('2006-01-01')),
+PARTITION p601 VALUES LESS THAN (TO_DAYS('2006-02-01')),
+PARTITION p602 VALUES LESS THAN (TO_DAYS('2006-03-01')),
+PARTITION p603 VALUES LESS THAN (TO_DAYS('2006-04-01')),
+PARTITION p604 VALUES LESS THAN (TO_DAYS('2006-05-01')),
+PARTITION p605 VALUES LESS THAN (TO_DAYS('2006-06-01')),
+PARTITION p606 VALUES LESS THAN (TO_DAYS('2006-07-01')),
+PARTITION p607 VALUES LESS THAN (TO_DAYS('2006-08-01')));
+INSERT INTO t1 VALUES ('2003-01-13'),('2003-06-20'),('2003-08-30');
+INSERT INTO t1 VALUES ('2003-04-13'),('2003-07-20'),('2003-10-30');
+INSERT INTO t1 VALUES ('2003-05-13'),('2003-11-20'),('2003-12-30');
+INSERT INTO t1 VALUES ('2004-01-13'),('2004-01-20'),('2004-01-30');
+INSERT INTO t1 VALUES ('2004-02-13'),('2004-02-20'),('2004-02-28');
+INSERT INTO t1 VALUES ('2004-03-13'),('2004-03-20'),('2004-03-30');
+INSERT INTO t1 VALUES ('2004-04-13'),('2004-04-20'),('2004-04-30');
+INSERT INTO t1 VALUES ('2004-05-13'),('2004-05-20'),('2004-05-30');
+INSERT INTO t1 VALUES ('2004-06-13'),('2004-06-20'),('2004-06-30');
+INSERT INTO t1 VALUES ('2004-07-13'),('2004-07-20'),('2004-07-30');
+INSERT INTO t1 VALUES ('2004-08-13'),('2004-08-20'),('2004-08-30');
+INSERT INTO t1 VALUES ('2004-09-13'),('2004-09-20'),('2004-09-30');
+INSERT INTO t1 VALUES ('2004-10-13'),('2004-10-20'),('2004-10-30');
+INSERT INTO t1 VALUES ('2004-11-13'),('2004-11-20'),('2004-11-30');
+INSERT INTO t1 VALUES ('2004-12-13'),('2004-12-20'),('2004-12-30');
+INSERT INTO t1 VALUES ('2005-01-13'),('2005-01-20'),('2005-01-30');
+INSERT INTO t1 VALUES ('2005-02-13'),('2005-02-20'),('2005-02-28');
+INSERT INTO t1 VALUES ('2005-03-13'),('2005-03-20'),('2005-03-30');
+INSERT INTO t1 VALUES ('2005-04-13'),('2005-04-20'),('2005-04-30');
+INSERT INTO t1 VALUES ('2005-05-13'),('2005-05-20'),('2005-05-30');
+INSERT INTO t1 VALUES ('2005-06-13'),('2005-06-20'),('2005-06-30');
+INSERT INTO t1 VALUES ('2005-07-13'),('2005-07-20'),('2005-07-30');
+INSERT INTO t1 VALUES ('2005-08-13'),('2005-08-20'),('2005-08-30');
+INSERT INTO t1 VALUES ('2005-09-13'),('2005-09-20'),('2005-09-30');
+INSERT INTO t1 VALUES ('2005-10-13'),('2005-10-20'),('2005-10-30');
+INSERT INTO t1 VALUES ('2005-11-13'),('2005-11-20'),('2005-11-30');
+INSERT INTO t1 VALUES ('2005-12-13'),('2005-12-20'),('2005-12-30');
+INSERT INTO t1 VALUES ('2006-01-13'),('2006-01-20'),('2006-01-30');
+INSERT INTO t1 VALUES ('2006-02-13'),('2006-02-20'),('2006-02-28');
+INSERT INTO t1 VALUES ('2006-03-13'),('2006-03-20'),('2006-03-30');
+INSERT INTO t1 VALUES ('2006-04-13'),('2006-04-20'),('2006-04-30');
+INSERT INTO t1 VALUES ('2006-05-13'),('2006-05-20'),('2006-05-30');
+INSERT INTO t1 VALUES ('2006-06-13'),('2006-06-20'),('2006-06-30');
+INSERT INTO t1 VALUES ('2006-07-13'),('2006-07-20'),('2006-07-30');
+SELECT * FROM t1
+WHERE a >= '2004-07-01' AND a <= '2004-09-30';
+a
+2004-07-13
+2004-07-20
+2004-07-30
+2004-08-13
+2004-08-20
+2004-08-30
+2004-09-13
+2004-09-20
+2004-09-30
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE a >= '2004-07-01' AND a <= '2004-09-30';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p407,p408,p409 ALL NULL NULL NULL NULL 9 Using where
+SELECT * from t1
+WHERE (a >= '2004-07-01' AND a <= '2004-09-30') OR
+(a >= '2005-07-01' AND a <= '2005-09-30');
+a
+2004-07-13
+2004-07-20
+2004-07-30
+2004-08-13
+2004-08-20
+2004-08-30
+2004-09-13
+2004-09-20
+2004-09-30
+2005-07-13
+2005-07-20
+2005-07-30
+2005-08-13
+2005-08-20
+2005-08-30
+2005-09-13
+2005-09-20
+2005-09-30
+EXPLAIN PARTITIONS SELECT * from t1
+WHERE (a >= '2004-07-01' AND a <= '2004-09-30') OR
+(a >= '2005-07-01' AND a <= '2005-09-30');
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p407,p408,p409,p507,p508,p509 ALL NULL NULL NULL NULL 18 Using where
+DROP TABLE t1;
diff --git a/mysql-test/r/ps.result b/mysql-test/r/ps.result
index 3e3981c0050..b2f5cec896f 100644
--- a/mysql-test/r/ps.result
+++ b/mysql-test/r/ps.result
@@ -485,6 +485,20 @@ execute stmt;
pnum
deallocate prepare stmt;
drop table t1, t2;
+drop table if exists t1;
+create temporary table if not exists t1 (a1 int);
+prepare stmt from "delete t1 from t1 where (cast(a1/3 as unsigned) * 3) = a1";
+drop temporary table t1;
+create temporary table if not exists t1 (a1 int);
+execute stmt;
+drop temporary table t1;
+create temporary table if not exists t1 (a1 int);
+execute stmt;
+drop temporary table t1;
+create temporary table if not exists t1 (a1 int);
+execute stmt;
+drop temporary table t1;
+deallocate prepare stmt;
create table t1 (a varchar(20));
insert into t1 values ('foo');
prepare stmt FROM 'SELECT char_length (a) FROM t1';
@@ -1158,3 +1172,108 @@ Warnings:
Error 1146 Table 'test.t4' doesn't exist
deallocate prepare stmt;
drop table t1, t2, t3;
+create database mysqltest_long_database_name_to_thrash_heap;
+use test;
+create table t1 (i int);
+prepare stmt from "alter table test.t1 rename t1";
+use mysqltest_long_database_name_to_thrash_heap;
+execute stmt;
+show tables like 't1';
+Tables_in_mysqltest_long_database_name_to_thrash_heap (t1)
+prepare stmt from "alter table test.t1 rename t1";
+use test;
+execute stmt;
+show tables like 't1';
+Tables_in_test (t1)
+use mysqltest_long_database_name_to_thrash_heap;
+show tables like 't1';
+Tables_in_mysqltest_long_database_name_to_thrash_heap (t1)
+t1
+deallocate prepare stmt;
+use mysqltest_long_database_name_to_thrash_heap;
+prepare stmt_create from "create table t1 (i int)";
+prepare stmt_insert from "insert into t1 (i) values (1)";
+prepare stmt_update from "update t1 set i=2";
+prepare stmt_delete from "delete from t1 where i=2";
+prepare stmt_select from "select * from t1";
+prepare stmt_alter from "alter table t1 add column (b int)";
+prepare stmt_alter1 from "alter table t1 drop column b";
+prepare stmt_analyze from "analyze table t1";
+prepare stmt_optimize from "optimize table t1";
+prepare stmt_show from "show tables like 't1'";
+prepare stmt_truncate from "truncate table t1";
+prepare stmt_drop from "drop table t1";
+drop table t1;
+use test;
+execute stmt_create;
+show tables like 't1';
+Tables_in_test (t1)
+use mysqltest_long_database_name_to_thrash_heap;
+show tables like 't1';
+Tables_in_mysqltest_long_database_name_to_thrash_heap (t1)
+t1
+use test;
+execute stmt_insert;
+select * from mysqltest_long_database_name_to_thrash_heap.t1;
+i
+1
+execute stmt_update;
+select * from mysqltest_long_database_name_to_thrash_heap.t1;
+i
+2
+execute stmt_delete;
+execute stmt_select;
+i
+execute stmt_alter;
+show columns from mysqltest_long_database_name_to_thrash_heap.t1;
+Field Type Null Key Default Extra
+i int(11) YES NULL
+b int(11) YES NULL
+execute stmt_alter1;
+show columns from mysqltest_long_database_name_to_thrash_heap.t1;
+Field Type Null Key Default Extra
+i int(11) YES NULL
+execute stmt_analyze;
+Table Op Msg_type Msg_text
+mysqltest_long_database_name_to_thrash_heap.t1 analyze status Table is already up to date
+execute stmt_optimize;
+Table Op Msg_type Msg_text
+mysqltest_long_database_name_to_thrash_heap.t1 optimize status Table is already up to date
+execute stmt_show;
+Tables_in_mysqltest_long_database_name_to_thrash_heap (t1)
+t1
+execute stmt_truncate;
+execute stmt_drop;
+show tables like 't1';
+Tables_in_test (t1)
+use mysqltest_long_database_name_to_thrash_heap;
+show tables like 't1';
+Tables_in_mysqltest_long_database_name_to_thrash_heap (t1)
+drop database mysqltest_long_database_name_to_thrash_heap;
+prepare stmt_create from "create table t1 (i int)";
+ERROR 3D000: No database selected
+prepare stmt_insert from "insert into t1 (i) values (1)";
+ERROR 3D000: No database selected
+prepare stmt_update from "update t1 set i=2";
+ERROR 3D000: No database selected
+prepare stmt_delete from "delete from t1 where i=2";
+ERROR 3D000: No database selected
+prepare stmt_select from "select * from t1";
+ERROR 3D000: No database selected
+prepare stmt_alter from "alter table t1 add column (b int)";
+ERROR 3D000: No database selected
+prepare stmt_alter1 from "alter table t1 drop column b";
+ERROR 3D000: No database selected
+prepare stmt_analyze from "analyze table t1";
+ERROR 3D000: No database selected
+prepare stmt_optimize from "optimize table t1";
+ERROR 3D000: No database selected
+prepare stmt_show from "show tables like 't1'";
+ERROR 3D000: No database selected
+prepare stmt_truncate from "truncate table t1";
+ERROR 3D000: No database selected
+prepare stmt_drop from "drop table t1";
+ERROR 3D000: No database selected
+create temporary table t1 (i int);
+ERROR 3D000: No database selected
+use test;
diff --git a/mysql-test/r/ps_2myisam.result b/mysql-test/r/ps_2myisam.result
index 5ba601bf305..d42693bdb95 100644
--- a/mysql-test/r/ps_2myisam.result
+++ b/mysql-test/r/ps_2myisam.result
@@ -1775,7 +1775,7 @@ NULL as const12, @arg12 as param12,
show create table t5 ;
Table Create Table
t5 CREATE TABLE `t5` (
- `const01` bigint(1) NOT NULL DEFAULT '0',
+ `const01` int(1) NOT NULL DEFAULT '0',
`param01` bigint(20) DEFAULT NULL,
`const02` decimal(2,1) NOT NULL DEFAULT '0.0',
`param02` decimal(65,30) DEFAULT NULL,
@@ -1805,7 +1805,7 @@ t5 CREATE TABLE `t5` (
) ENGINE=MyISAM DEFAULT CHARSET=latin1
select * from t5 ;
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
-def test t5 t5 const01 const01 8 1 1 N 32769 0 63
+def test t5 t5 const01 const01 3 1 1 N 32769 0 63
def test t5 t5 param01 param01 8 20 1 Y 32768 0 63
def test t5 t5 const02 const02 246 4 3 N 1 1 63
def test t5 t5 param02 param02 246 67 32 Y 0 30 63
diff --git a/mysql-test/r/ps_3innodb.result b/mysql-test/r/ps_3innodb.result
index 836fb7b58c2..9bc66a00a34 100644
--- a/mysql-test/r/ps_3innodb.result
+++ b/mysql-test/r/ps_3innodb.result
@@ -1758,7 +1758,7 @@ NULL as const12, @arg12 as param12,
show create table t5 ;
Table Create Table
t5 CREATE TABLE `t5` (
- `const01` bigint(1) NOT NULL DEFAULT '0',
+ `const01` int(1) NOT NULL DEFAULT '0',
`param01` bigint(20) DEFAULT NULL,
`const02` decimal(2,1) NOT NULL DEFAULT '0.0',
`param02` decimal(65,30) DEFAULT NULL,
@@ -1788,7 +1788,7 @@ t5 CREATE TABLE `t5` (
) ENGINE=MyISAM DEFAULT CHARSET=latin1
select * from t5 ;
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
-def test t5 t5 const01 const01 8 1 1 N 32769 0 63
+def test t5 t5 const01 const01 3 1 1 N 32769 0 63
def test t5 t5 param01 param01 8 20 1 Y 32768 0 63
def test t5 t5 const02 const02 246 4 3 N 1 1 63
def test t5 t5 param02 param02 246 67 32 Y 0 30 63
diff --git a/mysql-test/r/ps_4heap.result b/mysql-test/r/ps_4heap.result
index 150820d92f0..370065351d1 100644
--- a/mysql-test/r/ps_4heap.result
+++ b/mysql-test/r/ps_4heap.result
@@ -1759,7 +1759,7 @@ NULL as const12, @arg12 as param12,
show create table t5 ;
Table Create Table
t5 CREATE TABLE `t5` (
- `const01` bigint(1) NOT NULL DEFAULT '0',
+ `const01` int(1) NOT NULL DEFAULT '0',
`param01` bigint(20) DEFAULT NULL,
`const02` decimal(2,1) NOT NULL DEFAULT '0.0',
`param02` decimal(65,30) DEFAULT NULL,
@@ -1789,7 +1789,7 @@ t5 CREATE TABLE `t5` (
) ENGINE=MyISAM DEFAULT CHARSET=latin1
select * from t5 ;
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
-def test t5 t5 const01 const01 8 1 1 N 32769 0 63
+def test t5 t5 const01 const01 3 1 1 N 32769 0 63
def test t5 t5 param01 param01 8 20 1 Y 32768 0 63
def test t5 t5 const02 const02 246 4 3 N 1 1 63
def test t5 t5 param02 param02 246 67 32 Y 0 30 63
diff --git a/mysql-test/r/ps_5merge.result b/mysql-test/r/ps_5merge.result
index 72573ce3294..6a304787ffb 100644
--- a/mysql-test/r/ps_5merge.result
+++ b/mysql-test/r/ps_5merge.result
@@ -1695,7 +1695,7 @@ NULL as const12, @arg12 as param12,
show create table t5 ;
Table Create Table
t5 CREATE TABLE `t5` (
- `const01` bigint(1) NOT NULL DEFAULT '0',
+ `const01` int(1) NOT NULL DEFAULT '0',
`param01` bigint(20) DEFAULT NULL,
`const02` decimal(2,1) NOT NULL DEFAULT '0.0',
`param02` decimal(65,30) DEFAULT NULL,
@@ -1725,7 +1725,7 @@ t5 CREATE TABLE `t5` (
) ENGINE=MyISAM DEFAULT CHARSET=latin1
select * from t5 ;
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
-def test t5 t5 const01 const01 8 1 1 N 32769 0 63
+def test t5 t5 const01 const01 3 1 1 N 32769 0 63
def test t5 t5 param01 param01 8 20 1 Y 32768 0 63
def test t5 t5 const02 const02 246 4 3 N 1 1 63
def test t5 t5 param02 param02 246 67 32 Y 0 30 63
@@ -4709,7 +4709,7 @@ NULL as const12, @arg12 as param12,
show create table t5 ;
Table Create Table
t5 CREATE TABLE `t5` (
- `const01` bigint(1) NOT NULL DEFAULT '0',
+ `const01` int(1) NOT NULL DEFAULT '0',
`param01` bigint(20) DEFAULT NULL,
`const02` decimal(2,1) NOT NULL DEFAULT '0.0',
`param02` decimal(65,30) DEFAULT NULL,
@@ -4739,7 +4739,7 @@ t5 CREATE TABLE `t5` (
) ENGINE=MyISAM DEFAULT CHARSET=latin1
select * from t5 ;
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
-def test t5 t5 const01 const01 8 1 1 N 32769 0 63
+def test t5 t5 const01 const01 3 1 1 N 32769 0 63
def test t5 t5 param01 param01 8 20 1 Y 32768 0 63
def test t5 t5 const02 const02 246 4 3 N 1 1 63
def test t5 t5 param02 param02 246 67 32 Y 0 30 63
diff --git a/mysql-test/r/ps_6bdb.result b/mysql-test/r/ps_6bdb.result
index 44b4ebf33d4..94e6da6ccd1 100644
--- a/mysql-test/r/ps_6bdb.result
+++ b/mysql-test/r/ps_6bdb.result
@@ -1758,7 +1758,7 @@ NULL as const12, @arg12 as param12,
show create table t5 ;
Table Create Table
t5 CREATE TABLE `t5` (
- `const01` bigint(1) NOT NULL DEFAULT '0',
+ `const01` int(1) NOT NULL DEFAULT '0',
`param01` bigint(20) DEFAULT NULL,
`const02` decimal(2,1) NOT NULL DEFAULT '0.0',
`param02` decimal(65,30) DEFAULT NULL,
@@ -1788,7 +1788,7 @@ t5 CREATE TABLE `t5` (
) ENGINE=MyISAM DEFAULT CHARSET=latin1
select * from t5 ;
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
-def test t5 t5 const01 const01 8 1 1 N 32769 0 63
+def test t5 t5 const01 const01 3 1 1 N 32769 0 63
def test t5 t5 param01 param01 8 20 1 Y 32768 0 63
def test t5 t5 const02 const02 246 4 3 N 1 1 63
def test t5 t5 param02 param02 246 67 32 Y 0 30 63
diff --git a/mysql-test/r/ps_7ndb.result b/mysql-test/r/ps_7ndb.result
index 543435e4cd9..772848dcf38 100644
--- a/mysql-test/r/ps_7ndb.result
+++ b/mysql-test/r/ps_7ndb.result
@@ -1758,7 +1758,7 @@ NULL as const12, @arg12 as param12,
show create table t5 ;
Table Create Table
t5 CREATE TABLE `t5` (
- `const01` bigint(1) NOT NULL default '0',
+ `const01` int(1) NOT NULL default '0',
`param01` bigint(20) default NULL,
`const02` decimal(2,1) NOT NULL default '0.0',
`param02` decimal(65,30) default NULL,
@@ -1788,7 +1788,7 @@ t5 CREATE TABLE `t5` (
) ENGINE=MyISAM DEFAULT CHARSET=latin1
select * from t5 ;
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
-def test t5 t5 const01 const01 8 1 1 N 32769 0 63
+def test t5 t5 const01 const01 3 1 1 N 32769 0 63
def test t5 t5 param01 param01 8 20 1 Y 32768 0 63
def test t5 t5 const02 const02 246 4 3 N 1 1 63
def test t5 t5 param02 param02 246 67 32 Y 0 30 63
diff --git a/mysql-test/r/query_cache.result b/mysql-test/r/query_cache.result
index 383b1b8ec15..703d92979f1 100644
--- a/mysql-test/r/query_cache.result
+++ b/mysql-test/r/query_cache.result
@@ -233,7 +233,7 @@ explain extended select benchmark(1,1) from t1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 system NULL NULL NULL NULL 0 0.00 const row not found
Warnings:
-Note 1003 select sql_no_cache benchmark(1,1) AS `benchmark(1,1)` from `test`.`t1`
+Note 1003 select benchmark(1,1) AS `benchmark(1,1)` from `test`.`t1`
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 0
diff --git a/mysql-test/r/range.result b/mysql-test/r/range.result
index f687ab7e2c6..a7e7d3aff2c 100644
--- a/mysql-test/r/range.result
+++ b/mysql-test/r/range.result
@@ -644,6 +644,27 @@ SELECT count(*) FROM t1 WHERE CLIENT='000' AND (ARG1 != ' 2' OR ARG1 != ' 1');
count(*)
4
drop table t1;
+create table t1 (a int);
+insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+CREATE TABLE t2 (
+pk1 int(11) NOT NULL,
+pk2 int(11) NOT NULL,
+pk3 int(11) NOT NULL,
+pk4 int(11) NOT NULL,
+filler char(82),
+PRIMARY KEY (pk1,pk2,pk3,pk4)
+) DEFAULT CHARSET=latin1;
+insert into t2 select 1, A.a+10*B.a, 432, 44, 'fillerZ' from t1 A, t1 B;
+INSERT INTO t2 VALUES (2621, 2635, 0, 0,'filler'), (2621, 2635, 1, 0,'filler'),
+(2621, 2635, 10, 0,'filler'), (2621, 2635, 11, 0,'filler'),
+(2621, 2635, 14, 0,'filler'), (2621, 2635, 1000015, 0,'filler');
+SELECT * FROM t2
+WHERE ((((pk4 =0) AND (pk1 =2621) AND (pk2 =2635)))
+OR ((pk4 =1) AND (((pk1 IN ( 7, 2, 1 ))) OR (pk1 =522)) AND ((pk2 IN ( 0, 2635))))
+) AND (pk3 >=1000000);
+pk1 pk2 pk3 pk4 filler
+2621 2635 1000015 0 filler
+drop table t1, t2;
CREATE TABLE t1 (
id int(11) NOT NULL auto_increment,
status varchar(20),
diff --git a/mysql-test/r/rpl_auto_increment.result b/mysql-test/r/rpl_auto_increment.result
index 9984ccf51f3..083f3a4e901 100644
--- a/mysql-test/r/rpl_auto_increment.result
+++ b/mysql-test/r/rpl_auto_increment.result
@@ -183,3 +183,47 @@ a
32
42
drop table t1;
+create table t1 (a tinyint not null auto_increment primary key) engine=myisam;
+insert into t1 values(103);
+set auto_increment_increment=11;
+set auto_increment_offset=4;
+insert into t1 values(null);
+insert into t1 values(null);
+insert into t1 values(null);
+ERROR 23000: Duplicate entry '125' for key 'PRIMARY'
+select a, mod(a-@@auto_increment_offset,@@auto_increment_increment) from t1 order by a;
+a mod(a-@@auto_increment_offset,@@auto_increment_increment)
+103 0
+114 0
+125 0
+create table t2 (a tinyint unsigned not null auto_increment primary key) engine=myisam;
+set auto_increment_increment=10;
+set auto_increment_offset=1;
+set insert_id=1000;
+insert into t2 values(null);
+Warnings:
+Warning 1264 Out of range value for column 'a' at row 1
+select a, mod(a-@@auto_increment_offset,@@auto_increment_increment) from t2 order by a;
+a mod(a-@@auto_increment_offset,@@auto_increment_increment)
+251 0
+create table t3 like t1;
+set auto_increment_increment=1000;
+set auto_increment_offset=700;
+insert into t3 values(null);
+Warnings:
+Warning 1264 Out of range value for column 'a' at row 1
+select * from t3 order by a;
+a
+127
+select * from t1 order by a;
+a
+103
+114
+125
+select * from t2 order by a;
+a
+251
+select * from t3 order by a;
+a
+127
+drop table t1,t2,t3;
diff --git a/mysql-test/r/rpl_drop_db.result b/mysql-test/r/rpl_drop_db.result
index 3a35dc266b6..51d6d71ca95 100644
--- a/mysql-test/r/rpl_drop_db.result
+++ b/mysql-test/r/rpl_drop_db.result
@@ -33,3 +33,5 @@ n
1234
DROP DATABASE mysqltest1;
stop slave;
+use test;
+drop table t1;
diff --git a/mysql-test/r/rpl_get_lock.result b/mysql-test/r/rpl_get_lock.result
index dce5f7182a8..235640acad0 100644
--- a/mysql-test/r/rpl_get_lock.result
+++ b/mysql-test/r/rpl_get_lock.result
@@ -25,7 +25,7 @@ explain extended select is_free_lock("lock"), is_used_lock("lock");
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select sql_no_cache is_free_lock(_latin1'lock') AS `is_free_lock("lock")`,is_used_lock(_latin1'lock') AS `is_used_lock("lock")`
+Note 1003 select is_free_lock(_latin1'lock') AS `is_free_lock("lock")`,is_used_lock(_latin1'lock') AS `is_used_lock("lock")`
select is_free_lock("lock2");
is_free_lock("lock2")
1
diff --git a/mysql-test/r/rpl_insert.result b/mysql-test/r/rpl_insert.result
new file mode 100644
index 00000000000..bcc9b176ed3
--- /dev/null
+++ b/mysql-test/r/rpl_insert.result
@@ -0,0 +1,16 @@
+stop slave;
+drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
+reset master;
+reset slave;
+drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
+start slave;
+CREATE SCHEMA IF NOT EXISTS mysqlslap;
+USE mysqlslap;
+CREATE TABLE t1 (id INT, name VARCHAR(64));
+SELECT COUNT(*) FROM mysqlslap.t1;
+COUNT(*)
+5000
+SELECT COUNT(*) FROM mysqlslap.t1;
+COUNT(*)
+5000
+DROP SCHEMA IF EXISTS mysqlslap;
diff --git a/mysql-test/r/rpl_insert_id.result b/mysql-test/r/rpl_insert_id.result
index 622b1489f91..3c33fe1be2b 100644
--- a/mysql-test/r/rpl_insert_id.result
+++ b/mysql-test/r/rpl_insert_id.result
@@ -73,6 +73,20 @@ CREATE TABLE t1 ( a INT UNIQUE );
SET FOREIGN_KEY_CHECKS=0;
INSERT INTO t1 VALUES (1),(1);
Got one of the listed errors
+drop table t1;
+create table t1(a int auto_increment, key(a));
+create table t2(a int);
+insert into t1 (a) values (null);
+insert into t2 (a) select a from t1 where a is null;
+insert into t2 (a) select a from t1 where a is null;
+select * from t2;
+a
+1
+select * from t2;
+a
+1
+drop table t1;
+drop table t2;
drop function if exists bug15728;
drop function if exists bug15728_insert;
drop table if exists t1, t2;
@@ -117,6 +131,14 @@ insert into t1 (last_id) values (bug15728());
select last_insert_id();
last_insert_id()
5
+drop procedure if exists foo;
+create procedure foo()
+begin
+declare res int;
+insert into t2 (last_id) values (bug15728());
+insert into t1 (last_id) values (bug15728());
+end|
+call foo();
select * from t1;
id last_id
1 0
@@ -124,10 +146,126 @@ id last_id
3 2
4 1
5 4
+6 3
select * from t2;
id last_id
1 3
2 4
+3 5
+select * from t1;
+id last_id
+1 0
+2 1
+3 2
+4 1
+5 4
+6 3
+select * from t2;
+id last_id
+1 3
+2 4
+3 5
drop function bug15728;
drop function bug15728_insert;
+drop table t1;
+drop procedure foo;
+create table t1 (n int primary key auto_increment not null,
+b int, unique(b));
+set sql_log_bin=0;
+insert into t1 values(null,100);
+replace into t1 values(null,50),(null,100),(null,150);
+select * from t1 order by n;
+n b
+2 50
+3 100
+4 150
+truncate table t1;
+set sql_log_bin=1;
+insert into t1 values(null,100);
+select * from t1 order by n;
+n b
+1 100
+insert into t1 values(null,200),(null,300);
+delete from t1 where b <> 100;
+select * from t1 order by n;
+n b
+1 100
+replace into t1 values(null,100),(null,350);
+select * from t1 order by n;
+n b
+2 100
+3 350
+select * from t1 order by n;
+n b
+2 100
+3 350
+insert into t1 values (NULL,400),(3,500),(NULL,600) on duplicate key UPDATE n=1000;
+select * from t1 order by n;
+n b
+2 100
+4 400
+1000 350
+1001 600
+select * from t1 order by n;
+n b
+2 100
+4 400
+1000 350
+1001 600
+drop table t1;
+create table t1 (n int primary key auto_increment not null,
+b int, unique(b));
+insert into t1 values(null,100);
+select * from t1 order by n;
+n b
+1 100
+insert into t1 values(null,200),(null,300);
+delete from t1 where b <> 100;
+select * from t1 order by n;
+n b
+1 100
+insert into t1 values(null,100),(null,350) on duplicate key update n=2;
+select * from t1 order by n;
+n b
+2 100
+3 350
+select * from t1 order by n;
+n b
+2 100
+3 350
+drop table t1;
+truncate table t2;
+create table t1 (id tinyint primary key);
+create function insid() returns int
+begin
+insert into t2 (last_id) values (0);
+return 0;
+end|
+set sql_log_bin=0;
+insert into t2 (id) values(1),(2),(3);
+delete from t2;
+set sql_log_bin=1;
+select insid();
+insid()
+0
+set sql_log_bin=0;
+insert into t2 (id) values(5),(6),(7);
+delete from t2 where id>=5;
+set sql_log_bin=1;
+insert into t1 select insid();
+select * from t1;
+id
+0
+select * from t2;
+id last_id
+4 0
+8 0
+select * from t1;
+id
+0
+select * from t2;
+id last_id
+4 0
+8 0
drop table t1, t2;
+drop function insid;
diff --git a/mysql-test/r/rpl_insert_id_pk.result b/mysql-test/r/rpl_insert_id_pk.result
index 0a452d2dd73..79815355332 100644
--- a/mysql-test/r/rpl_insert_id_pk.result
+++ b/mysql-test/r/rpl_insert_id_pk.result
@@ -73,3 +73,4 @@ CREATE TABLE t1 ( a INT UNIQUE );
SET FOREIGN_KEY_CHECKS=0;
INSERT INTO t1 VALUES (1),(1);
Got one of the listed errors
+drop table t1;
diff --git a/mysql-test/r/rpl_loaddata.result b/mysql-test/r/rpl_loaddata.result
index 47e056429ce..c22815186d1 100644
--- a/mysql-test/r/rpl_loaddata.result
+++ b/mysql-test/r/rpl_loaddata.result
@@ -5,8 +5,14 @@ reset slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
start slave;
reset master;
+select last_insert_id();
+last_insert_id()
+0
create table t1(a int not null auto_increment, b int, primary key(a) );
load data infile '../std_data_ln/rpl_loaddata.dat' into table t1;
+select last_insert_id();
+last_insert_id()
+1
create temporary table t2 (day date,id int(9),category enum('a','b','c'),name varchar(60));
load data infile '../std_data_ln/rpl_loaddata2.dat' into table t2 fields terminated by ',' optionally enclosed by '%' escaped by '@' lines terminated by '\n##\n' starting by '>' ignore 1 lines;
create table t3 (day date,id int(9),category enum('a','b','c'),name varchar(60));
@@ -22,7 +28,7 @@ day id category name
2003-03-22 2416 a bbbbb
show master status;
File Position Binlog_Do_DB Binlog_Ignore_DB
-slave-bin.000001 1276
+slave-bin.000001 1248
drop table t1;
drop table t2;
drop table t3;
@@ -33,7 +39,7 @@ set global sql_slave_skip_counter=1;
start slave;
show slave status;
Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master
-# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 1793 # # master-bin.000001 Yes Yes # 0 0 1793 # None 0 No #
+# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 1765 # # master-bin.000001 Yes Yes # 0 0 1765 # None 0 No #
set sql_log_bin=0;
delete from t1;
set sql_log_bin=1;
@@ -43,7 +49,7 @@ change master to master_user='test';
change master to master_user='root';
show slave status;
Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master
-# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 1828 # # master-bin.000001 No No # 0 0 1828 # None 0 No #
+# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 1800 # # master-bin.000001 No No # 0 0 1800 # None 0 No #
set global sql_slave_skip_counter=1;
start slave;
set sql_log_bin=0;
@@ -79,3 +85,4 @@ terminated by ',' optionally enclosed by '%' escaped by '@' lines terminated by
ERROR 23000: Duplicate entry '2003-03-22' for key 'day'
drop table t2;
drop table t2;
+drop table t1;
diff --git a/mysql-test/r/rpl_master_pos_wait.result b/mysql-test/r/rpl_master_pos_wait.result
index 7239d28c98f..81d9043c8ce 100644
--- a/mysql-test/r/rpl_master_pos_wait.result
+++ b/mysql-test/r/rpl_master_pos_wait.result
@@ -11,7 +11,7 @@ explain extended select master_pos_wait('master-bin.999999',0,2);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select sql_no_cache master_pos_wait(_latin1'master-bin.999999',0,2) AS `master_pos_wait('master-bin.999999',0,2)`
+Note 1003 select master_pos_wait(_latin1'master-bin.999999',0,2) AS `master_pos_wait('master-bin.999999',0,2)`
select master_pos_wait('master-bin.999999',0);
stop slave sql_thread;
master_pos_wait('master-bin.999999',0)
diff --git a/mysql-test/r/rpl_multi_update3.result b/mysql-test/r/rpl_multi_update3.result
index b81af7c6e39..f20cecfb68f 100644
--- a/mysql-test/r/rpl_multi_update3.result
+++ b/mysql-test/r/rpl_multi_update3.result
@@ -194,3 +194,4 @@ idpro price nbprice
1 1.0000 3
2 1.0000 2
3 2.0000 1
+DROP TABLE t1, t2;
diff --git a/mysql-test/r/rpl_ndb_auto_inc.result b/mysql-test/r/rpl_ndb_auto_inc.result
index 71217442698..dd4cc90a75f 100644
--- a/mysql-test/r/rpl_ndb_auto_inc.result
+++ b/mysql-test/r/rpl_ndb_auto_inc.result
@@ -71,8 +71,8 @@ a
250
251
400
+401
1000
-1001
******* Select from Slave *************
select * from t1 ORDER BY a;
@@ -83,8 +83,8 @@ a
250
251
400
+401
1000
-1001
drop table t1;
create table t1 (a int not null auto_increment, primary key (a)) engine=NDB;
insert into t1 values (NULL),(5),(NULL),(NULL);
@@ -120,8 +120,6 @@ a
502
503
600
-603
-604
610
611
******* Select from Slave *************
@@ -137,8 +135,6 @@ a
502
503
600
-603
-604
610
611
drop table t1;
diff --git a/mysql-test/r/rpl_ndb_multi_update3.result b/mysql-test/r/rpl_ndb_multi_update3.result
index 63ec20d8fc8..2bbbe7785a9 100644
--- a/mysql-test/r/rpl_ndb_multi_update3.result
+++ b/mysql-test/r/rpl_ndb_multi_update3.result
@@ -194,3 +194,4 @@ idpro price nbprice
1 1.0000 3
2 1.0000 2
3 2.0000 1
+DROP TABLE t1, t2;
diff --git a/mysql-test/r/rpl_ndb_sp006.result b/mysql-test/r/rpl_ndb_sp006.result
index 6d32f26b280..482d43c8f10 100644
--- a/mysql-test/r/rpl_ndb_sp006.result
+++ b/mysql-test/r/rpl_ndb_sp006.result
@@ -43,3 +43,4 @@ DROP PROCEDURE IF EXISTS mysqltest1.p1;
DROP PROCEDURE IF EXISTS mysqltest1.p2;
DROP TABLE IF EXISTS mysqltest1.t1;
DROP TABLE IF EXISTS mysqltest1.t2;
+DROP DATABASE mysqltest1;
diff --git a/mysql-test/r/rpl_row_create_table.result b/mysql-test/r/rpl_row_create_table.result
index f314aa39b81..03388f59b8c 100644
--- a/mysql-test/r/rpl_row_create_table.result
+++ b/mysql-test/r/rpl_row_create_table.result
@@ -137,11 +137,11 @@ a b
1 2
2 4
3 6
-SHOW BINLOG EVENTS FROM 1256;
+SHOW BINLOG EVENTS FROM 1118;
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 1256 Query 1 1356 use `test`; CREATE TABLE t7 (a INT, b INT UNIQUE)
-master-bin.000001 1356 Table_map 1 1396 table_id: # (test.t7)
-master-bin.000001 1396 Write_rows 1 1452 table_id: # flags: STMT_END_F
+master-bin.000001 1118 Query 1 1218 use `test`; CREATE TABLE t7 (a INT, b INT UNIQUE)
+master-bin.000001 1218 Table_map 1 1258 table_id: # (test.t7)
+master-bin.000001 1258 Write_rows 1 1314 table_id: # flags: STMT_END_F
SELECT * FROM t7 ORDER BY a,b;
a b
1 2
@@ -154,10 +154,10 @@ INSERT INTO t7 SELECT a,b FROM tt4;
ROLLBACK;
Warnings:
Warning 1196 Some non-transactional changed tables couldn't be rolled back
-SHOW BINLOG EVENTS FROM 1452;
+SHOW BINLOG EVENTS FROM 1314;
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 1452 Table_map 1 1492 table_id: # (test.t7)
-master-bin.000001 1492 Write_rows 1 1548 table_id: # flags: STMT_END_F
+master-bin.000001 1314 Table_map 1 1354 table_id: # (test.t7)
+master-bin.000001 1354 Write_rows 1 1410 table_id: # flags: STMT_END_F
SELECT * FROM t7 ORDER BY a,b;
a b
1 2
@@ -178,6 +178,7 @@ CREATE TABLE t8 LIKE t4;
CREATE TABLE t9 LIKE tt4;
CREATE TEMPORARY TABLE tt5 LIKE t4;
CREATE TEMPORARY TABLE tt6 LIKE tt4;
+CREATE TEMPORARY TABLE tt7 SELECT 1;
**** On Master ****
SHOW CREATE TABLE t8;
Table t8
@@ -191,10 +192,10 @@ Create Table CREATE TABLE `t9` (
`a` int(11) DEFAULT NULL,
`b` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1
-SHOW BINLOG EVENTS FROM 1548;
+SHOW BINLOG EVENTS FROM 1410;
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 1548 Query 1 1634 use `test`; CREATE TABLE t8 LIKE t4
-master-bin.000001 1634 Query 1 1773 use `test`; CREATE TABLE `t9` (
+master-bin.000001 1410 Query 1 1496 use `test`; CREATE TABLE t8 LIKE t4
+master-bin.000001 1496 Query 1 1635 use `test`; CREATE TABLE `t9` (
`a` int(11) DEFAULT NULL,
`b` int(11) DEFAULT NULL
)
diff --git a/mysql-test/r/rpl_row_delayed_ins.result b/mysql-test/r/rpl_row_delayed_ins.result
index 16001b96ac2..31fffeb59cc 100644
--- a/mysql-test/r/rpl_row_delayed_ins.result
+++ b/mysql-test/r/rpl_row_delayed_ins.result
@@ -17,8 +17,10 @@ Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 4 Format_desc 1 102 Server ver: VERSION, Binlog ver: 4
master-bin.000001 102 Query 1 222 use `test`; create table t1(a int not null primary key) engine=myisam
master-bin.000001 222 Table_map 1 261 table_id: # (test.t1)
-master-bin.000001 261 Write_rows 1 305 table_id: # flags: STMT_END_F
-master-bin.000001 305 Query 1 380 use `test`; flush tables
+master-bin.000001 261 Write_rows 1 295 table_id: # flags: STMT_END_F
+master-bin.000001 295 Table_map 1 334 table_id: # (test.t1)
+master-bin.000001 334 Write_rows 1 373 table_id: # flags: STMT_END_F
+master-bin.000001 373 Query 1 448 use `test`; flush tables
SELECT * FROM t1 ORDER BY a;
a
1
diff --git a/mysql-test/r/rpl_row_sp006_InnoDB.result b/mysql-test/r/rpl_row_sp006_InnoDB.result
index 9b9b04dbbee..8339e77d3a0 100644
--- a/mysql-test/r/rpl_row_sp006_InnoDB.result
+++ b/mysql-test/r/rpl_row_sp006_InnoDB.result
@@ -43,3 +43,4 @@ DROP PROCEDURE IF EXISTS mysqltest1.p1;
DROP PROCEDURE IF EXISTS mysqltest1.p2;
DROP TABLE IF EXISTS mysqltest1.t1;
DROP TABLE IF EXISTS mysqltest1.t2;
+DROP DATABASE mysqltest1;
diff --git a/mysql-test/r/rpl_stm_no_op.result b/mysql-test/r/rpl_stm_no_op.result
index eb445085a1d..5a253d61fcb 100644
--- a/mysql-test/r/rpl_stm_no_op.result
+++ b/mysql-test/r/rpl_stm_no_op.result
@@ -49,4 +49,4 @@ select * from t1;
a b
select * from t2;
a b
-drop table t1;
+drop table t1, t2;
diff --git a/mysql-test/r/rpl_switch_stm_row_mixed.result b/mysql-test/r/rpl_switch_stm_row_mixed.result
index 313037bb9dc..c319005b2a4 100644
--- a/mysql-test/r/rpl_switch_stm_row_mixed.result
+++ b/mysql-test/r/rpl_switch_stm_row_mixed.result
@@ -18,18 +18,18 @@ select @@global.binlog_format, @@session.binlog_format;
ROW ROW
CREATE TABLE t1 (a varchar(100));
prepare stmt1 from 'insert into t1 select concat(UUID(),?)';
-set @string="emergency";
-insert into t1 values("work");
+set @string="emergency_1_";
+insert into t1 values("work_2_");
execute stmt1 using @string;
deallocate prepare stmt1;
prepare stmt1 from 'insert into t1 select ?';
-insert into t1 values(concat(UUID(),"work"));
+insert into t1 values(concat(UUID(),"work_3_"));
execute stmt1 using @string;
deallocate prepare stmt1;
-insert into t1 values(concat("for",UUID()));
-insert into t1 select "yesterday";
-create temporary table tmp(a char(3));
-insert into tmp values("see");
+insert into t1 values(concat("for_4_",UUID()));
+insert into t1 select "yesterday_5_";
+create temporary table tmp(a char(100));
+insert into tmp values("see_6_");
set binlog_format=statement;
ERROR HY000: Cannot switch out of the row-based binary log format when the session has open temporary tables
insert into t1 select * from tmp;
@@ -55,16 +55,16 @@ select @@global.binlog_format, @@session.binlog_format;
@@global.binlog_format @@session.binlog_format
STATEMENT STATEMENT
prepare stmt1 from 'insert into t1 select ?';
-set @string="emergency";
-insert into t1 values("work");
+set @string="emergency_7_";
+insert into t1 values("work_8_");
execute stmt1 using @string;
deallocate prepare stmt1;
prepare stmt1 from 'insert into t1 select ?';
-insert into t1 values("work");
+insert into t1 values("work_9_");
execute stmt1 using @string;
deallocate prepare stmt1;
-insert into t1 values("for");
-insert into t1 select "yesterday";
+insert into t1 values("for_10_");
+insert into t1 select "yesterday_11_";
set binlog_format=default;
select @@global.binlog_format, @@session.binlog_format;
@@global.binlog_format @@session.binlog_format
@@ -75,16 +75,16 @@ select @@global.binlog_format, @@session.binlog_format;
@@global.binlog_format @@session.binlog_format
STATEMENT STATEMENT
prepare stmt1 from 'insert into t1 select ?';
-set @string="emergency";
-insert into t1 values("work");
+set @string="emergency_12_";
+insert into t1 values("work_13_");
execute stmt1 using @string;
deallocate prepare stmt1;
prepare stmt1 from 'insert into t1 select ?';
-insert into t1 values("work");
+insert into t1 values("work_14_");
execute stmt1 using @string;
deallocate prepare stmt1;
-insert into t1 values("for");
-insert into t1 select "yesterday";
+insert into t1 values("for_15_");
+insert into t1 select "yesterday_16_";
set binlog_format=mixed;
select @@global.binlog_format, @@session.binlog_format;
@@global.binlog_format @@session.binlog_format
@@ -94,40 +94,40 @@ select @@global.binlog_format, @@session.binlog_format;
@@global.binlog_format @@session.binlog_format
MIXED MIXED
prepare stmt1 from 'insert into t1 select concat(UUID(),?)';
-set @string="emergency";
-insert into t1 values("work");
+set @string="emergency_17_";
+insert into t1 values("work_18_");
execute stmt1 using @string;
deallocate prepare stmt1;
prepare stmt1 from 'insert into t1 select ?';
-insert into t1 values(concat(UUID(),"work"));
+insert into t1 values(concat(UUID(),"work_19_"));
execute stmt1 using @string;
deallocate prepare stmt1;
-insert into t1 values(concat("for",UUID()));
-insert into t1 select "yesterday";
+insert into t1 values(concat("for_20_",UUID()));
+insert into t1 select "yesterday_21_";
prepare stmt1 from 'insert into t1 select ?';
-insert into t1 values(concat(UUID(),"work"));
+insert into t1 values(concat(UUID(),"work_22_"));
execute stmt1 using @string;
deallocate prepare stmt1;
-insert into t1 values(concat("for",UUID()));
-insert into t1 select "yesterday";
-create table t2 select UUID();
+insert into t1 values(concat("for_23_",UUID()));
+insert into t1 select "yesterday_24_";
+create table t2 select rpad(UUID(),100,' ');
create table t3 select 1 union select UUID();
create table t4 select * from t1 where 3 in (select 1 union select 2 union select UUID() union select 3);
create table t5 select * from t1 where 3 in (select 1 union select 2 union select curdate() union select 3);
insert into t5 select UUID() from t1 where 3 in (select 1 union select 2 union select 3 union select * from t4);
create procedure foo()
begin
-insert into t1 values("work");
-insert into t1 values(concat("for",UUID()));
-insert into t1 select "yesterday";
+insert into t1 values("work_25_");
+insert into t1 values(concat("for_26_",UUID()));
+insert into t1 select "yesterday_27_";
end|
create procedure foo2()
begin
-insert into t1 values(concat("emergency",UUID()));
-insert into t1 values("work");
-insert into t1 values(concat("for",UUID()));
+insert into t1 values(concat("emergency_28_",UUID()));
+insert into t1 values("work_29_");
+insert into t1 values(concat("for_30_",UUID()));
set session binlog_format=row; # accepted for stored procs
-insert into t1 values("more work");
+insert into t1 values("more work_31_");
set session binlog_format=mixed;
end|
create function foo3() returns bigint unsigned
@@ -136,32 +136,231 @@ set session binlog_format=row; # rejected for stored funcs
insert into t1 values("alarm");
return 100;
end|
+create procedure foo4(x varchar(100))
+begin
+insert into t1 values(concat("work_250_",x));
+insert into t1 select "yesterday_270_";
+end|
call foo();
call foo2();
+call foo4("hello");
+call foo4(UUID());
+call foo4("world");
select foo3();
ERROR HY000: Cannot change the binary logging format inside a stored function or trigger
select * from t1 where a="alarm";
a
+drop function foo3;
+create function foo3() returns bigint unsigned
+begin
+insert into t1 values("foo3_32_");
+call foo();
+return 100;
+end|
+insert into t2 select foo3();
+prepare stmt1 from 'insert into t2 select foo3()';
+execute stmt1;
+execute stmt1;
+deallocate prepare stmt1;
+create function foo4() returns bigint unsigned
+begin
+insert into t2 select foo3();
+return 100;
+end|
+select foo4();
+foo4()
+100
+prepare stmt1 from 'select foo4()';
+execute stmt1;
+foo4()
+100
+execute stmt1;
+foo4()
+100
+deallocate prepare stmt1;
+create function foo5() returns bigint unsigned
+begin
+insert into t2 select UUID();
+return 100;
+end|
+select foo5();
+foo5()
+100
+prepare stmt1 from 'select foo5()';
+execute stmt1;
+foo5()
+100
+execute stmt1;
+foo5()
+100
+deallocate prepare stmt1;
+create function foo6(x varchar(100)) returns bigint unsigned
+begin
+insert into t2 select x;
+return 100;
+end|
+select foo6("foo6_1_");
+foo6("foo6_1_")
+100
+select foo6(concat("foo6_2_",UUID()));
+foo6(concat("foo6_2_",UUID()))
+100
+prepare stmt1 from 'select foo6(concat("foo6_3_",UUID()))';
+execute stmt1;
+foo6(concat("foo6_3_",UUID()))
+100
+execute stmt1;
+foo6(concat("foo6_3_",UUID()))
+100
+deallocate prepare stmt1;
+create view v1 as select uuid();
+create table t11 (data varchar(255));
+insert into t11 select * from v1;
+insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11');
+prepare stmt1 from "insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11')";
+execute stmt1;
+execute stmt1;
+deallocate prepare stmt1;
+create trigger t11_bi before insert on t11 for each row
+begin
+set NEW.data = concat(NEW.data,UUID());
+end|
+insert into t11 values("try_560_");
+insert delayed into t2 values("delay_1_");
+insert delayed into t2 values(concat("delay_2_",UUID()));
+insert delayed into t2 values("delay_6_");
+insert delayed into t2 values(rand());
+set @a=2.345;
+insert delayed into t2 values(@a);
+create table t20 select * from t1;
+create table t21 select * from t2;
+create table t22 select * from t3;
+drop table t1,t2,t3;
+create table t1 (a int primary key auto_increment, b varchar(100));
+create table t2 (a int primary key auto_increment, b varchar(100));
+create table t3 (b varchar(100));
+create function f (x varchar(100)) returns int deterministic
+begin
+insert into t1 values(null,x);
+insert into t2 values(null,x);
+return 1;
+end|
+select f("try_41_");
+f("try_41_")
+1
+use mysqltest1;
+insert into t2 values(2,null),(3,null),(4,null);
+delete from t2 where a>=2;
+select f("try_42_");
+f("try_42_")
+1
+insert into t2 values(3,null),(4,null);
+delete from t2 where a>=3;
+prepare stmt1 from 'select f(?)';
+set @string="try_43_";
+insert into t1 values(null,"try_44_");
+execute stmt1 using @string;
+f(?)
+1
+deallocate prepare stmt1;
+create table t12 select * from t1;
+drop table t1;
+create table t1 (a int, b varchar(100), key(a));
+select f("try_45_");
+f("try_45_")
+1
+create table t13 select * from t1;
+drop table t1;
+create table t1 (a int primary key auto_increment, b varchar(100));
+drop function f;
+create table t14 (unique (a)) select * from t2;
+truncate table t2;
+create function f1 (x varchar(100)) returns int deterministic
+begin
+insert into t1 values(null,x);
+return 1;
+end|
+create function f2 (x varchar(100)) returns int deterministic
+begin
+insert into t2 values(null,x);
+return 1;
+end|
+select f1("try_46_"),f2("try_47_");
+f1("try_46_") f2("try_47_")
+1 1
+insert into t2 values(2,null),(3,null),(4,null);
+delete from t2 where a>=2;
+select f1("try_48_"),f2("try_49_");
+f1("try_48_") f2("try_49_")
+1 1
+insert into t3 values(concat("try_50_",f1("try_51_"),f2("try_52_")));
+drop function f2;
+create function f2 (x varchar(100)) returns int deterministic
+begin
+declare y int;
+insert into t1 values(null,x);
+set y = (select count(*) from t2);
+return y;
+end|
+select f1("try_53_"),f2("try_54_");
+f1("try_53_") f2("try_54_")
+1 3
+drop function f2;
+create trigger t1_bi before insert on t1 for each row
+begin
+insert into t2 values(null,"try_55_");
+end|
+insert into t1 values(null,"try_56_");
+alter table t1 modify a int, drop primary key;
+insert into t1 values(null,"try_57_");
+CREATE TEMPORARY TABLE t15 SELECT UUID();
+create table t16 like t15;
+INSERT INTO t16 SELECT * FROM t15;
+insert into t16 values("try_65_");
+drop table t15;
+insert into t16 values("try_66_");
select count(*) from t1;
count(*)
-36
+7
select count(*) from t2;
count(*)
-1
+5
select count(*) from t3;
count(*)
-2
+1
select count(*) from t4;
count(*)
29
select count(*) from t5;
count(*)
58
+select count(*) from t11;
+count(*)
+8
+select count(*) from t20;
+count(*)
+66
+select count(*) from t21;
+count(*)
+19
+select count(*) from t22;
+count(*)
+2
+select count(*) from t12;
+count(*)
+4
+select count(*) from t13;
+count(*)
+1
+select count(*) from t14;
+count(*)
+4
+select count(*) from t16;
+count(*)
+3
show binlog events from 102;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query 1 # drop database if exists mysqltest1
-master-bin.000001 # Table_map 1 # table_id: # (mysql.proc)
-master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
master-bin.000001 # Query 1 # create database mysqltest1
master-bin.000001 # Query 1 # use `mysqltest1`; CREATE TABLE t1 (a varchar(100))
master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
@@ -178,78 +377,71 @@ master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
-master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
-master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work")
-master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E6379 COLLATE latin1_swedish_ci
+master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work_8_")
+master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E63795F375F COLLATE latin1_swedish_ci
master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select @'string'
-master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work")
-master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E6379 COLLATE latin1_swedish_ci
+master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work_9_")
+master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E63795F375F COLLATE latin1_swedish_ci
master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select @'string'
-master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("for")
-master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday"
-master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work")
-master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E6379 COLLATE latin1_swedish_ci
+master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("for_10_")
+master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday_11_"
+master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work_13_")
+master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E63795F31325F COLLATE latin1_swedish_ci
master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select @'string'
-master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work")
-master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E6379 COLLATE latin1_swedish_ci
+master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work_14_")
+master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E63795F31325F COLLATE latin1_swedish_ci
master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select @'string'
-master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("for")
-master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday"
-master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work")
+master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("for_15_")
+master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday_16_"
+master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work_18_")
master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E6379 COLLATE latin1_swedish_ci
+master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E63795F31375F COLLATE latin1_swedish_ci
master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select @'string'
master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday"
+master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday_21_"
master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E6379 COLLATE latin1_swedish_ci
+master-bin.000001 # User var 1 # @`string`=_latin1 0x656D657267656E63795F31375F COLLATE latin1_swedish_ci
master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select @'string'
master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday"
+master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday_24_"
master-bin.000001 # Query 1 # use `mysqltest1`; CREATE TABLE `t2` (
- `UUID()` varchar(36) CHARACTER SET utf8 NOT NULL DEFAULT ''
+ `rpad(UUID(),100,' ')` varchar(100) CHARACTER SET utf8 NOT NULL DEFAULT ''
)
master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2)
master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 # Query 1 # use `mysqltest1`; COMMIT
master-bin.000001 # Query 1 # use `mysqltest1`; CREATE TABLE `t3` (
`1` varbinary(108) NOT NULL DEFAULT ''
)
master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t3)
master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 # Query 1 # use `mysqltest1`; COMMIT
master-bin.000001 # Query 1 # use `mysqltest1`; CREATE TABLE `t4` (
`a` varchar(100) DEFAULT NULL
)
master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t4)
master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 # Query 1 # use `mysqltest1`; COMMIT
master-bin.000001 # Query 1 # use `mysqltest1`; create table t5 select * from t1 where 3 in (select 1 union select 2 union select curdate() union select 3)
master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t5)
master-bin.000001 # Write_rows 1 # table_id: #
master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` procedure foo()
begin
-insert into t1 values("work");
-insert into t1 values(concat("for",UUID()));
-insert into t1 select "yesterday";
+insert into t1 values("work_25_");
+insert into t1 values(concat("for_26_",UUID()));
+insert into t1 select "yesterday_27_";
end
master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` procedure foo2()
begin
-insert into t1 values(concat("emergency",UUID()));
-insert into t1 values("work");
-insert into t1 values(concat("for",UUID()));
+insert into t1 values(concat("emergency_28_",UUID()));
+insert into t1 values("work_29_");
+insert into t1 values(concat("for_30_",UUID()));
set session binlog_format=row; # accepted for stored procs
-insert into t1 values("more work");
+insert into t1 values("more work_31_");
set session binlog_format=mixed;
end
master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function foo3() returns bigint unsigned
@@ -258,15 +450,213 @@ set session binlog_format=row; # rejected for stored funcs
insert into t1 values("alarm");
return 100;
end
-master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work")
+master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` procedure foo4(x varchar(100))
+begin
+insert into t1 values(concat("work_250_",x));
+insert into t1 select "yesterday_270_";
+end
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday"
+master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values(concat("work_250_", NAME_CONST('x',_latin1'hello')))
+master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday_270_"
master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
-master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values("work")
master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values(concat("work_250_", NAME_CONST('x',_latin1'world')))
+master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 select "yesterday_270_"
+master-bin.000001 # Query 1 # use `mysqltest1`; drop function foo3
+master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function foo3() returns bigint unsigned
+begin
+insert into t1 values("foo3_32_");
+call foo();
+return 100;
+end
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2)
master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
+master-bin.000001 # Write_rows 1 # table_id: #
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2)
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
+master-bin.000001 # Write_rows 1 # table_id: #
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2)
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
+master-bin.000001 # Write_rows 1 # table_id: #
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function foo4() returns bigint unsigned
+begin
+insert into t2 select foo3();
+return 100;
+end
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2)
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
+master-bin.000001 # Write_rows 1 # table_id: #
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2)
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
+master-bin.000001 # Write_rows 1 # table_id: #
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2)
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
+master-bin.000001 # Write_rows 1 # table_id: #
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function foo5() returns bigint unsigned
+begin
+insert into t2 select UUID();
+return 100;
+end
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function foo6(x varchar(100)) returns bigint unsigned
+begin
+insert into t2 select x;
+return 100;
+end
+master-bin.000001 # Query 1 # use `mysqltest1`; SELECT `foo6`(_latin1'foo6_1_')
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Query 1 # use `mysqltest1`; CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select uuid()
+master-bin.000001 # Query 1 # use `mysqltest1`; create table t11 (data varchar(255))
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t11)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Query 1 # use `mysqltest1`; insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11')
+master-bin.000001 # Query 1 # use `mysqltest1`; insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11')
+master-bin.000001 # Query 1 # use `mysqltest1`; insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11')
+master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` trigger t11_bi before insert on t11 for each row
+begin
+set NEW.data = concat(NEW.data,UUID());
+end
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t11)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Query 1 # use `mysqltest1`; create table t20 select * from t1
+master-bin.000001 # Query 1 # use `mysqltest1`; create table t21 select * from t2
+master-bin.000001 # Query 1 # use `mysqltest1`; create table t22 select * from t3
+master-bin.000001 # Query 1 # use `mysqltest1`; drop table t1,t2,t3
+master-bin.000001 # Query 1 # use `mysqltest1`; create table t1 (a int primary key auto_increment, b varchar(100))
+master-bin.000001 # Query 1 # use `mysqltest1`; create table t2 (a int primary key auto_increment, b varchar(100))
+master-bin.000001 # Query 1 # use `mysqltest1`; create table t3 (b varchar(100))
+master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function f (x varchar(100)) returns int deterministic
+begin
+insert into t1 values(null,x);
+insert into t2 values(null,x);
+return 1;
+end
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2)
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
+master-bin.000001 # Write_rows 1 # table_id: #
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2)
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
+master-bin.000001 # Write_rows 1 # table_id: #
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Intvar 1 # INSERT_ID=3
+master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values(null,"try_44_")
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2)
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
+master-bin.000001 # Write_rows 1 # table_id: #
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Query 1 # use `mysqltest1`; create table t12 select * from t1
+master-bin.000001 # Query 1 # use `mysqltest1`; drop table t1
+master-bin.000001 # Query 1 # use `mysqltest1`; create table t1 (a int, b varchar(100), key(a))
+master-bin.000001 # Intvar 1 # INSERT_ID=4
+master-bin.000001 # Query 1 # use `mysqltest1`; SELECT `f`(_latin1'try_45_')
+master-bin.000001 # Query 1 # use `mysqltest1`; create table t13 select * from t1
+master-bin.000001 # Query 1 # use `mysqltest1`; drop table t1
+master-bin.000001 # Query 1 # use `mysqltest1`; create table t1 (a int primary key auto_increment, b varchar(100))
+master-bin.000001 # Query 1 # use `mysqltest1`; drop function f
+master-bin.000001 # Query 1 # use `mysqltest1`; create table t14 (unique (a)) select * from t2
+master-bin.000001 # Query 1 # use `mysqltest1`; truncate table t2
+master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function f1 (x varchar(100)) returns int deterministic
+begin
+insert into t1 values(null,x);
+return 1;
+end
+master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function f2 (x varchar(100)) returns int deterministic
+begin
+insert into t2 values(null,x);
+return 1;
+end
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2)
+master-bin.000001 # Write_rows 1 # table_id: #
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2)
+master-bin.000001 # Write_rows 1 # table_id: #
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t3)
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2)
+master-bin.000001 # Write_rows 1 # table_id: #
+master-bin.000001 # Write_rows 1 # table_id: #
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Query 1 # use `mysqltest1`; drop function f2
+master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` function f2 (x varchar(100)) returns int deterministic
+begin
+declare y int;
+insert into t1 values(null,x);
+set y = (select count(*) from t2);
+return y;
+end
+master-bin.000001 # Intvar 1 # INSERT_ID=4
+master-bin.000001 # Query 1 # use `mysqltest1`; SELECT `f1`(_latin1'try_53_')
+master-bin.000001 # Intvar 1 # INSERT_ID=5
+master-bin.000001 # Query 1 # use `mysqltest1`; SELECT `f2`(_latin1'try_54_')
+master-bin.000001 # Query 1 # use `mysqltest1`; drop function f2
+master-bin.000001 # Query 1 # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` trigger t1_bi before insert on t1 for each row
+begin
+insert into t2 values(null,"try_55_");
+end
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t1)
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t2)
+master-bin.000001 # Write_rows 1 # table_id: #
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Query 1 # use `mysqltest1`; alter table t1 modify a int, drop primary key
+master-bin.000001 # Intvar 1 # INSERT_ID=5
+master-bin.000001 # Query 1 # use `mysqltest1`; insert into t1 values(null,"try_57_")
+master-bin.000001 # Query 1 # use `mysqltest1`; CREATE TABLE `t16` (
+ `UUID()` varchar(36) CHARACTER SET utf8 NOT NULL DEFAULT ''
+)
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t16)
+master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Table_map 1 # table_id: # (mysqltest1.t16)
master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
+master-bin.000001 # Query 1 # use `mysqltest1`; insert into t16 values("try_66_")
drop database mysqltest1;
diff --git a/mysql-test/r/rpl_temporary.result b/mysql-test/r/rpl_temporary.result
index 01882c683a4..7e7d0cebe1d 100644
--- a/mysql-test/r/rpl_temporary.result
+++ b/mysql-test/r/rpl_temporary.result
@@ -76,16 +76,11 @@ drop table t1,t2;
create temporary table t3 (f int);
create temporary table t4 (f int);
create table t5 (f int);
-drop table if exists t999;
-create temporary table t999 (f int);
-LOAD DATA INFILE "./tmp/bl_dump_thread_id" into table t999;
-drop table t999;
-insert into t4 values (1);
-kill `select id from information_schema.processlist where command='Binlog Dump'`;
+select id from information_schema.processlist where command='Binlog Dump' into @id;
+kill @id;
insert into t5 select * from t4;
select * from t5 /* must be 1 after reconnection */;
f
-1
drop temporary table t4;
drop table t5;
set @@session.pseudo_thread_id=100;
@@ -93,6 +88,7 @@ create temporary table t101 (id int);
create temporary table t102 (id int);
set @@session.pseudo_thread_id=200;
create temporary table t201 (id int);
+create temporary table `t``201` (id int);
create temporary table `#sql_not_user_table202` (id int);
set @@session.pseudo_thread_id=300;
create temporary table t301 (id int);
diff --git a/mysql-test/r/rpl_variables.result b/mysql-test/r/rpl_variables.result
index 25b5ca13f77..bbfe8373a46 100644
--- a/mysql-test/r/rpl_variables.result
+++ b/mysql-test/r/rpl_variables.result
@@ -15,3 +15,4 @@ slave_load_tmpdir SLAVE_LOAD_TMPDIR
show variables like 'slave_skip_errors';
Variable_name Value
slave_skip_errors 3,100,137,643,1752
+set global slave_net_timeout=default;
diff --git a/mysql-test/r/select.result b/mysql-test/r/select.result
index 04aaa10e98e..f48ae16505b 100644
--- a/mysql-test/r/select.result
+++ b/mysql-test/r/select.result
@@ -2730,6 +2730,81 @@ ERROR 42000: Key 'a' doesn't exist in table 't1'
EXPLAIN SELECT * FROM t1 FORCE INDEX (a);
ERROR 42000: Key 'a' doesn't exist in table 't1'
DROP TABLE t1;
+CREATE TABLE t1 (a int, b int);
+INSERT INTO t1 VALUES (1,1), (2,1), (4,10);
+CREATE TABLE t2 (a int PRIMARY KEY, b int, KEY b (b));
+INSERT INTO t2 VALUES (1,NULL), (2,10);
+ALTER TABLE t1 ENABLE KEYS;
+EXPLAIN SELECT STRAIGHT_JOIN SQL_NO_CACHE COUNT(*) FROM t2, t1 WHERE t1.b = t2.b OR t2.b IS NULL;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 index b b 5 NULL 2 Using index
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using where
+SELECT STRAIGHT_JOIN SQL_NO_CACHE * FROM t2, t1 WHERE t1.b = t2.b OR t2.b IS NULL;
+a b a b
+1 NULL 1 1
+1 NULL 2 1
+1 NULL 4 10
+2 10 4 10
+EXPLAIN SELECT STRAIGHT_JOIN SQL_NO_CACHE COUNT(*) FROM t2, t1 WHERE t1.b = t2.b OR t2.b IS NULL;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 index b b 5 NULL 2 Using index
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using where
+SELECT STRAIGHT_JOIN SQL_NO_CACHE * FROM t2, t1 WHERE t1.b = t2.b OR t2.b IS NULL;
+a b a b
+1 NULL 1 1
+1 NULL 2 1
+1 NULL 4 10
+2 10 4 10
+DROP TABLE IF EXISTS t1,t2;
+CREATE TABLE t1 (key1 float default NULL, UNIQUE KEY key1 (key1));
+CREATE TABLE t2 (key2 float default NULL, UNIQUE KEY key2 (key2));
+INSERT INTO t1 VALUES (0.3762),(0.3845),(0.6158),(0.7941);
+INSERT INTO t2 VALUES (1.3762),(1.3845),(1.6158),(1.7941);
+explain select max(key1) from t1 where key1 <= 0.6158;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
+explain select max(key2) from t2 where key2 <= 1.6158;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
+explain select min(key1) from t1 where key1 >= 0.3762;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
+explain select min(key2) from t2 where key2 >= 1.3762;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
+explain select max(key1), min(key2) from t1, t2
+where key1 <= 0.6158 and key2 >= 1.3762;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
+explain select max(key1) from t1 where key1 <= 0.6158 and rand() + 0.5 >= 0.5;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
+explain select min(key1) from t1 where key1 >= 0.3762 and rand() + 0.5 >= 0.5;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
+select max(key1) from t1 where key1 <= 0.6158;
+max(key1)
+0.61580002307892
+select max(key2) from t2 where key2 <= 1.6158;
+max(key2)
+1.6158000230789
+select min(key1) from t1 where key1 >= 0.3762;
+min(key1)
+0.37619999051094
+select min(key2) from t2 where key2 >= 1.3762;
+min(key2)
+1.3761999607086
+select max(key1), min(key2) from t1, t2
+where key1 <= 0.6158 and key2 >= 1.3762;
+max(key1) min(key2)
+0.61580002307892 1.3761999607086
+select max(key1) from t1 where key1 <= 0.6158 and rand() + 0.5 >= 0.5;
+max(key1)
+0.61580002307892
+select min(key1) from t1 where key1 >= 0.3762 and rand() + 0.5 >= 0.5;
+min(key1)
+0.37619999051094
+DROP TABLE t1,t2;
CREATE TABLE t1 (i BIGINT UNSIGNED NOT NULL);
INSERT INTO t1 VALUES (10);
SELECT i='1e+01',i=1e+01, i in (1e+01,1e+01), i in ('1e+01','1e+01') FROM t1;
@@ -3395,3 +3470,12 @@ a t1.b + 0 t1.c + 0 a t2.b + 0 c d
1 0 1 1 0 1 NULL
2 0 1 NULL NULL NULL NULL
drop table t1,t2;
+SELECT 0.9888889889 * 1.011111411911;
+0.9888889889 * 1.011111411911
+0.9998769417899202067879
+prepare stmt from 'select 1 as " a "';
+Warnings:
+Warning 1546 Leading spaces are removed from name ' a '
+execute stmt;
+a
+1
diff --git a/mysql-test/r/show_check.result b/mysql-test/r/show_check.result
index 7237cf11fc0..f5b3fc99baf 100644
--- a/mysql-test/r/show_check.result
+++ b/mysql-test/r/show_check.result
@@ -607,6 +607,68 @@ DROP TABLE tyt2;
DROP TABLE urkunde;
SHOW TABLES FROM non_existing_database;
ERROR 42000: Unknown database 'non_existing_database'
+End of 4.1 tests
+DROP VIEW IF EXISTS v1;
+DROP PROCEDURE IF EXISTS p1;
+CREATE VIEW v1 AS SELECT 1;
+SHOW CREATE VIEW v1;
+View Create View
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select 1 AS `1`
+DROP VIEW v1;
+CREATE VIEW v1 AS SELECT SQL_CACHE 1;
+SHOW CREATE VIEW v1;
+View Create View
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_cache 1 AS `1`
+DROP VIEW v1;
+CREATE VIEW v1 AS SELECT SQL_NO_CACHE 1;
+SHOW CREATE VIEW v1;
+View Create View
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache 1 AS `1`
+DROP VIEW v1;
+CREATE VIEW v1 AS SELECT NOW();
+SHOW CREATE VIEW v1;
+View Create View
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select now() AS `NOW()`
+DROP VIEW v1;
+CREATE VIEW v1 AS SELECT SQL_CACHE NOW();
+SHOW CREATE VIEW v1;
+View Create View
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_cache now() AS `NOW()`
+DROP VIEW v1;
+CREATE VIEW v1 AS SELECT SQL_NO_CACHE NOW();
+SHOW CREATE VIEW v1;
+View Create View
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache now() AS `NOW()`
+DROP VIEW v1;
+CREATE VIEW v1 AS SELECT SQL_CACHE SQL_NO_CACHE NOW();
+SHOW CREATE VIEW v1;
+View Create View
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache now() AS `NOW()`
+DROP VIEW v1;
+CREATE VIEW v1 AS SELECT SQL_NO_CACHE SQL_CACHE NOW();
+SHOW CREATE VIEW v1;
+View Create View
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache now() AS `NOW()`
+DROP VIEW v1;
+CREATE VIEW v1 AS SELECT SQL_CACHE SQL_NO_CACHE SQL_CACHE NOW();
+SHOW CREATE VIEW v1;
+View Create View
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache now() AS `NOW()`
+DROP VIEW v1;
+CREATE PROCEDURE p1()
+BEGIN
+SET @s= 'CREATE VIEW v1 AS SELECT SQL_CACHE 1';
+PREPARE stmt FROM @s;
+EXECUTE stmt;
+DROP PREPARE stmt;
+END |
+CALL p1();
+SHOW CREATE VIEW v1;
+View Create View
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_cache 1 AS `1`
+DROP PROCEDURE p1;
+DROP VIEW v1;
+End of 5.0 tests.
SHOW AUTHORS;
create database mysqltest;
show create database mysqltest;
diff --git a/mysql-test/r/sp-error.result b/mysql-test/r/sp-error.result
index 29206393b04..bf36b4796b9 100644
--- a/mysql-test/r/sp-error.result
+++ b/mysql-test/r/sp-error.result
@@ -754,7 +754,7 @@ bug11834_2()
10
drop function bug11834_1;
execute stmt;
-ERROR 42000: FUNCTION test.bug11834_1 does not exist
+ERROR 42000: FUNCTION test.bug11834_2 does not exist
deallocate prepare stmt;
drop function bug11834_2;
DROP FUNCTION IF EXISTS bug12953|
diff --git a/mysql-test/r/sp-security.result b/mysql-test/r/sp-security.result
index af4f1c16c56..4860058a8a1 100644
--- a/mysql-test/r/sp-security.result
+++ b/mysql-test/r/sp-security.result
@@ -314,16 +314,6 @@ select * from db_bug14533.t1;
ERROR 42000: SELECT command denied to user 'user_bug14533'@'localhost' for table 't1'
drop user user_bug14533@localhost;
drop database db_bug14533;
-CREATE DATABASE db_bug7787;
-use db_bug7787;
-CREATE PROCEDURE p1()
-SHOW INNODB STATUS;
-Warnings:
-Warning 1541 The syntax 'SHOW INNODB STATUS' is deprecated and will be removed in MySQL 5.2. Please use 'SHOW ENGINE INNODB STATUS' instead.
-GRANT EXECUTE ON PROCEDURE p1 TO user_bug7787@localhost;
-DROP DATABASE db_bug7787;
-drop user user_bug7787@localhost;
-use test;
---> connection: root
DROP DATABASE IF EXISTS mysqltest;
@@ -420,3 +410,34 @@ ERROR HY000: There is no 'mysqltest_1'@'localhost' registered
---> connection: root
DROP USER mysqltest_2@localhost;
DROP DATABASE mysqltest;
+GRANT USAGE ON *.* TO user19857@localhost IDENTIFIED BY 'meow';
+GRANT SELECT, INSERT, UPDATE, DELETE, CREATE ROUTINE, ALTER ROUTINE ON test.* TO
+user19857@localhost;
+SELECT Host,User,Password FROM mysql.user WHERE User='user19857';
+Host User Password
+localhost user19857 *82DC221D557298F6CE9961037DB1C90604792F5C
+
+---> connection: mysqltest_2_con
+use test;
+CREATE PROCEDURE sp19857() DETERMINISTIC
+BEGIN
+DECLARE a INT;
+SET a=1;
+SELECT a;
+END //
+SHOW CREATE PROCEDURE test.sp19857;
+Procedure sql_mode Create Procedure
+sp19857 CREATE DEFINER=`user19857`@`localhost` PROCEDURE `sp19857`()
+ DETERMINISTIC
+BEGIN
+DECLARE a INT;
+SET a=1;
+SELECT a;
+END
+DROP PROCEDURE IF EXISTS test.sp19857;
+
+---> connection: root
+SELECT Host,User,Password FROM mysql.user WHERE User='user19857';
+Host User Password
+localhost user19857 *82DC221D557298F6CE9961037DB1C90604792F5C
+DROP USER user19857@localhost;
diff --git a/mysql-test/r/sp-vars.result b/mysql-test/r/sp-vars.result
index f620cd657f0..6335870caa9 100644
--- a/mysql-test/r/sp-vars.result
+++ b/mysql-test/r/sp-vars.result
@@ -1075,3 +1075,18 @@ SELECT f1();
f1()
abc
DROP FUNCTION f1;
+DROP PROCEDURE IF EXISTS p1;
+CREATE PROCEDURE p1()
+BEGIN
+DECLARE v_char VARCHAR(255);
+DECLARE v_text TEXT DEFAULT '';
+SET v_char = 'abc';
+SET v_text = v_char;
+SET v_char = 'def';
+SET v_text = concat(v_text, '|', v_char);
+SELECT v_text;
+END|
+CALL p1();
+v_text
+abc|def
+DROP PROCEDURE p1;
diff --git a/mysql-test/r/sp.result b/mysql-test/r/sp.result
index db4b377880c..d9c4577e2b2 100644
--- a/mysql-test/r/sp.result
+++ b/mysql-test/r/sp.result
@@ -4796,22 +4796,6 @@ i
0
drop table t3|
drop procedure bug16887|
-create table t3 (f1 int, f2 varchar(3), primary key(f1)) engine=innodb|
-insert into t3 values (1,'aaa'),(2,'bbb'),(3,'ccc')|
-CREATE FUNCTION bug13575 ( p1 integer )
-returns varchar(3)
-BEGIN
-DECLARE v1 VARCHAR(10) DEFAULT null;
-SELECT f2 INTO v1 FROM t3 WHERE f1 = p1;
-RETURN v1;
-END|
-select distinct f1, bug13575(f1) from t3 order by f1|
-f1 bug13575(f1)
-1 aaa
-2 bbb
-3 ccc
-drop function bug13575;
-drop table t3|
drop procedure if exists bug16474_1|
drop procedure if exists bug16474_2|
delete from t1|
@@ -4931,7 +4915,7 @@ create table t3 as select * from v1|
show create table t3|
Table Create Table
t3 CREATE TABLE `t3` (
- `j` bigint(11) DEFAULT NULL
+ `j` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1
select * from t3|
j
@@ -5000,6 +4984,52 @@ CALL bug18037_p2()|
DROP FUNCTION bug18037_f1|
DROP PROCEDURE bug18037_p1|
DROP PROCEDURE bug18037_p2|
+use test|
+create table t3 (i int)|
+insert into t3 values (1), (2)|
+create database mysqltest1|
+use mysqltest1|
+create function bug17199() returns varchar(2) deterministic return 'ok'|
+use test|
+select *, mysqltest1.bug17199() from t3|
+i mysqltest1.bug17199()
+1 ok
+2 ok
+use mysqltest1|
+create function bug18444(i int) returns int no sql deterministic return i + 1|
+use test|
+select mysqltest1.bug18444(i) from t3|
+mysqltest1.bug18444(i)
+2
+3
+drop database mysqltest1|
+create database mysqltest1 charset=utf8|
+create database mysqltest2 charset=utf8|
+create procedure mysqltest1.p1()
+begin
+-- alters the default collation of database test
+alter database character set koi8r;
+end|
+use mysqltest1|
+call p1()|
+show create database mysqltest1|
+Database Create Database
+mysqltest1 CREATE DATABASE `mysqltest1` /*!40100 DEFAULT CHARACTER SET koi8r */
+show create database mysqltest2|
+Database Create Database
+mysqltest2 CREATE DATABASE `mysqltest2` /*!40100 DEFAULT CHARACTER SET utf8 */
+alter database mysqltest1 character set utf8|
+use mysqltest2|
+call mysqltest1.p1()|
+show create database mysqltest1|
+Database Create Database
+mysqltest1 CREATE DATABASE `mysqltest1` /*!40100 DEFAULT CHARACTER SET koi8r */
+show create database mysqltest2|
+Database Create Database
+mysqltest2 CREATE DATABASE `mysqltest2` /*!40100 DEFAULT CHARACTER SET utf8 */
+drop database mysqltest1|
+drop database mysqltest2|
+use test|
drop table if exists t3|
drop procedure if exists bug15217|
create table t3 as select 1|
@@ -5021,4 +5051,23 @@ concat('data was: /', var1, '/')
data was: /1/
drop table t3|
drop procedure bug15217|
+drop procedure if exists bug19862|
+CREATE TABLE t11 (a INT)|
+CREATE TABLE t12 (a INT)|
+CREATE FUNCTION bug19862(x INT) RETURNS INT
+BEGIN
+INSERT INTO t11 VALUES (x);
+RETURN x+1;
+END|
+INSERT INTO t12 VALUES (1), (2)|
+SELECT bug19862(a) FROM t12 ORDER BY 1|
+bug19862(a)
+2
+3
+SELECT * FROM t11|
+a
+1
+2
+DROP TABLE t11, t12|
+DROP FUNCTION bug19862|
drop table t1,t2;
diff --git a/mysql-test/r/sp_notembedded.result b/mysql-test/r/sp_notembedded.result
index c5d60446e0a..a15f5013ef6 100644
--- a/mysql-test/r/sp_notembedded.result
+++ b/mysql-test/r/sp_notembedded.result
@@ -25,17 +25,6 @@ Id User Host db Command Time State Info
# event_scheduler localhost NULL Connect # Suspended NULL
# root localhost test Query # NULL show processlist
drop procedure bug4902_2|
-drop function if exists bug5278|
-create function bug5278 () returns char
-begin
-SET PASSWORD FOR 'bob'@'%.loc.gov' = PASSWORD('newpass');
-return 'okay';
-end|
-select bug5278()|
-ERROR 42000: Can't find any matching row in the user table
-select bug5278()|
-ERROR 42000: Can't find any matching row in the user table
-drop function bug5278|
drop table if exists t1|
create table t1 (
id char(16) not null default '',
@@ -208,3 +197,17 @@ drop procedure bug10100pd|
drop procedure bug10100pc|
drop view v1|
drop table t3|
+drop procedure if exists bug15298_1;
+drop procedure if exists bug15298_2;
+grant all privileges on test.* to 'mysqltest_1'@'localhost';
+create procedure 15298_1 () sql security definer show grants for current_user;
+create procedure 15298_2 () sql security definer show grants;
+call 15298_1();
+Grants for root@localhost
+GRANT ALL PRIVILEGES ON *.* TO 'root'@'localhost' WITH GRANT OPTION
+call 15298_2();
+Grants for root@localhost
+GRANT ALL PRIVILEGES ON *.* TO 'root'@'localhost' WITH GRANT OPTION
+drop user mysqltest_1@localhost;
+drop procedure 15298_1;
+drop procedure 15298_2;
diff --git a/mysql-test/r/sp_trans.result b/mysql-test/r/sp_trans.result
index 4c17226a9b0..a5012673c12 100644
--- a/mysql-test/r/sp_trans.result
+++ b/mysql-test/r/sp_trans.result
@@ -530,3 +530,29 @@ count(*)
drop table t3, t4|
drop procedure bug14210|
set @@session.max_heap_table_size=default|
+CREATE DATABASE db_bug7787|
+use db_bug7787|
+CREATE PROCEDURE p1()
+SHOW INNODB STATUS; |
+Warnings:
+Warning 1541 The syntax 'SHOW INNODB STATUS' is deprecated and will be removed in MySQL 5.2. Please use 'SHOW ENGINE INNODB STATUS' instead.
+GRANT EXECUTE ON PROCEDURE p1 TO user_bug7787@localhost|
+DROP DATABASE db_bug7787|
+drop user user_bug7787@localhost|
+use test|
+create table t3 (f1 int, f2 varchar(3), primary key(f1)) engine=innodb|
+insert into t3 values (1,'aaa'),(2,'bbb'),(3,'ccc')|
+CREATE FUNCTION bug13575 ( p1 integer )
+returns varchar(3)
+BEGIN
+DECLARE v1 VARCHAR(10) DEFAULT null;
+SELECT f2 INTO v1 FROM t3 WHERE f1 = p1;
+RETURN v1;
+END|
+select distinct f1, bug13575(f1) from t3 order by f1|
+f1 bug13575(f1)
+1 aaa
+2 bbb
+3 ccc
+drop function bug13575|
+drop table t3|
diff --git a/mysql-test/r/strict.result b/mysql-test/r/strict.result
index 5ca185d6abc..dd96dc6d983 100644
--- a/mysql-test/r/strict.result
+++ b/mysql-test/r/strict.result
@@ -1298,3 +1298,49 @@ t2 CREATE TABLE `t2` (
) ENGINE=MyISAM DEFAULT CHARSET=latin1
drop table t2,t1;
set @@sql_mode= @org_mode;
+set @@sql_mode='traditional';
+create table t1 (i int)
+comment '123456789*123456789*123456789*123456789*123456789*
+ 123456789*123456789*123456789*123456789*123456789*';
+ERROR HY000: Too long comment for table 't1'
+create table t1 (
+i int comment
+'123456789*123456789*123456789*123456789*
+ 123456789*123456789*123456789*123456789*
+ 123456789*123456789*123456789*123456789*
+ 123456789*123456789*123456789*123456789*
+ 123456789*123456789*123456789*123456789*
+ 123456789*123456789*123456789*123456789*
+ 123456789*123456789*123456789*123456789*');
+ERROR HY000: Too long comment for field 'i'
+set @@sql_mode= @org_mode;
+create table t1
+(i int comment
+'123456789*123456789*123456789*123456789*
+ 123456789*123456789*123456789*123456789*
+ 123456789*123456789*123456789*123456789*
+ 123456789*123456789*123456789*123456789*
+ 123456789*123456789*123456789*123456789*
+ 123456789*123456789*123456789*123456789*
+ 123456789*123456789*123456789*123456789*');
+Warnings:
+Warning 1105 Unknown error
+select column_name, column_comment from information_schema.columns where
+table_schema = 'test' and table_name = 't1';
+column_name column_comment
+i 123456789*123456789*123456789*123456789*
+ 123456789*123456789*123456789*123456789*
+ 123456789*123456789*123456789*123456789*
+ 123456789*123456789*123456789*123456789*
+ 123456789*123456789*123456789*123456789*
+ 123456789*123456789*123456789*123456789*
+drop table t1;
+set names utf8;
+create table t1 (i int)
+comment '123456789*123456789*123456789*123456789*123456789*123456789*';
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `i` int(11) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 COMMENT='123456789*123456789*123456789*123456789*123456789*123456789*'
+drop table t1;
diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result
index adad9391baa..bbad23446c6 100644
--- a/mysql-test/r/subselect.result
+++ b/mysql-test/r/subselect.result
@@ -1019,19 +1019,19 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 system NULL NULL NULL NULL 0 0.00 const row not found
2 UNCACHEABLE SUBQUERY t1 system NULL NULL NULL NULL 0 0.00 const row not found
Warnings:
-Note 1003 select sql_no_cache (select sql_no_cache rand() AS `RAND()` from `test`.`t1`) AS `(SELECT RAND() FROM t1)` from `test`.`t1`
+Note 1003 select (select rand() AS `RAND()` from `test`.`t1`) AS `(SELECT RAND() FROM t1)` from `test`.`t1`
EXPLAIN EXTENDED SELECT (SELECT ENCRYPT('test') FROM t1) FROM t1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 system NULL NULL NULL NULL 0 0.00 const row not found
2 UNCACHEABLE SUBQUERY t1 system NULL NULL NULL NULL 0 0.00 const row not found
Warnings:
-Note 1003 select sql_no_cache (select sql_no_cache encrypt(_latin1'test') AS `ENCRYPT('test')` from `test`.`t1`) AS `(SELECT ENCRYPT('test') FROM t1)` from `test`.`t1`
+Note 1003 select (select encrypt(_latin1'test') AS `ENCRYPT('test')` from `test`.`t1`) AS `(SELECT ENCRYPT('test') FROM t1)` from `test`.`t1`
EXPLAIN EXTENDED SELECT (SELECT BENCHMARK(1,1) FROM t1) FROM t1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 system NULL NULL NULL NULL 0 0.00 const row not found
2 UNCACHEABLE SUBQUERY t1 system NULL NULL NULL NULL 0 0.00 const row not found
Warnings:
-Note 1003 select sql_no_cache (select sql_no_cache benchmark(1,1) AS `BENCHMARK(1,1)` from `test`.`t1`) AS `(SELECT BENCHMARK(1,1) FROM t1)` from `test`.`t1`
+Note 1003 select (select benchmark(1,1) AS `BENCHMARK(1,1)` from `test`.`t1`) AS `(SELECT BENCHMARK(1,1) FROM t1)` from `test`.`t1`
drop table t1;
CREATE TABLE `t1` (
`mot` varchar(30) character set latin1 NOT NULL default '',
@@ -1087,24 +1087,24 @@ CREATE TABLE t1 SELECT * FROM (SELECT 1 as a,(SELECT 1)) a;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
- `a` bigint(1) NOT NULL DEFAULT '0',
- `(SELECT 1)` bigint(1) NOT NULL DEFAULT '0'
+ `a` int(1) NOT NULL DEFAULT '0',
+ `(SELECT 1)` int(1) NOT NULL DEFAULT '0'
) ENGINE=MyISAM DEFAULT CHARSET=latin1
drop table t1;
CREATE TABLE t1 SELECT * FROM (SELECT 1 as a,(SELECT a)) a;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
- `a` bigint(1) NOT NULL DEFAULT '0',
- `(SELECT a)` bigint(1) NOT NULL DEFAULT '0'
+ `a` int(1) NOT NULL DEFAULT '0',
+ `(SELECT a)` int(1) NOT NULL DEFAULT '0'
) ENGINE=MyISAM DEFAULT CHARSET=latin1
drop table t1;
CREATE TABLE t1 SELECT * FROM (SELECT 1 as a,(SELECT a+0)) a;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
- `a` bigint(1) NOT NULL DEFAULT '0',
- `(SELECT a+0)` bigint(3) NOT NULL DEFAULT '0'
+ `a` int(1) NOT NULL DEFAULT '0',
+ `(SELECT a+0)` int(3) NOT NULL DEFAULT '0'
) ENGINE=MyISAM DEFAULT CHARSET=latin1
drop table t1;
CREATE TABLE t1 SELECT (SELECT 1 as a UNION SELECT 1+1 limit 1,1) as a;
@@ -1126,7 +1126,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
2 UNCACHEABLE SUBQUERY t1 ALL NULL NULL NULL NULL 3 100.00
3 UNCACHEABLE SUBQUERY t1 ALL NULL NULL NULL NULL 3 100.00
Warnings:
-Note 1003 select sql_no_cache `test`.`t1`.`a` AS `a`,(select sql_no_cache (select sql_no_cache rand() AS `rand()` from `test`.`t1` limit 1) AS `(select rand() from t1 limit 1)` from `test`.`t1` limit 1) AS `(select (select rand() from t1 limit 1) from t1 limit 1)` from `test`.`t1`
+Note 1003 select `test`.`t1`.`a` AS `a`,(select (select rand() AS `rand()` from `test`.`t1` limit 1) AS `(select rand() from t1 limit 1)` from `test`.`t1` limit 1) AS `(select (select rand() from t1 limit 1) from t1 limit 1)` from `test`.`t1`
drop table t1;
select t1.Continent, t2.Name, t2.Population from t1 LEFT JOIN t2 ON t1.Code = t2.Country where t2.Population IN (select max(t2.Population) AS Population from t2, t1 where t2.Country = t1.Code group by Continent);
ERROR 42S02: Table 'test.t1' doesn't exist
@@ -2854,6 +2854,67 @@ a
3
4
DROP TABLE t1,t2,t3;
+purge master logs before (select adddate(current_timestamp(), interval -4 day));
+CREATE TABLE t1 (f1 INT);
+CREATE TABLE t2 (f2 INT);
+INSERT INTO t1 VALUES (1);
+SELECT * FROM t1 WHERE f1 > ALL (SELECT f2 FROM t2);
+f1
+1
+SELECT * FROM t1 WHERE f1 > ALL (SELECT f2 FROM t2 WHERE 1=0);
+f1
+1
+INSERT INTO t2 VALUES (1);
+INSERT INTO t2 VALUES (2);
+SELECT * FROM t1 WHERE f1 > ALL (SELECT f2 FROM t2 WHERE f2=0);
+f1
+1
+DROP TABLE t1, t2;
+select 1 from dual where 1 < any (select 2);
+1
+1
+select 1 from dual where 1 < all (select 2);
+1
+1
+select 1 from dual where 2 > any (select 1);
+1
+1
+select 1 from dual where 2 > all (select 1);
+1
+1
+select 1 from dual where 1 < any (select 2 from dual);
+1
+1
+select 1 from dual where 1 < all (select 2 from dual where 1!=1);
+1
+1
+create table t1 (s1 char);
+insert into t1 values (1),(2);
+select * from t1 where (s1 < any (select s1 from t1));
+s1
+1
+select * from t1 where not (s1 < any (select s1 from t1));
+s1
+2
+select * from t1 where (s1 < ALL (select s1+1 from t1));
+s1
+1
+select * from t1 where not(s1 < ALL (select s1+1 from t1));
+s1
+2
+select * from t1 where (s1+1 = ANY (select s1 from t1));
+s1
+1
+select * from t1 where NOT(s1+1 = ANY (select s1 from t1));
+s1
+2
+select * from t1 where (s1 = ALL (select s1/s1 from t1));
+s1
+1
+select * from t1 where NOT(s1 = ALL (select s1/s1 from t1));
+s1
+2
+drop table t1;
create table t1 (df decimal(5,1));
insert into t1 values(1.1);
insert into t1 values(2.2);
@@ -3204,3 +3265,84 @@ i
10000000000000000000
DROP TABLE t1;
DROP TABLE t2;
+CREATE TABLE t1 (
+id bigint(20) unsigned NOT NULL auto_increment,
+name varchar(255) NOT NULL,
+PRIMARY KEY (id)
+);
+INSERT INTO t1 VALUES
+(1, 'Balazs'), (2, 'Joe'), (3, 'Frank');
+CREATE TABLE t2 (
+id bigint(20) unsigned NOT NULL auto_increment,
+mid bigint(20) unsigned NOT NULL,
+date date NOT NULL,
+PRIMARY KEY (id)
+);
+INSERT INTO t2 VALUES
+(1, 1, '2006-03-30'), (2, 2, '2006-04-06'), (3, 3, '2006-04-13'),
+(4, 2, '2006-04-20'), (5, 1, '2006-05-01');
+SELECT *,
+(SELECT date FROM t2 WHERE mid = t1.id
+ORDER BY date DESC LIMIT 0, 1) AS date_last,
+(SELECT date FROM t2 WHERE mid = t1.id
+ORDER BY date DESC LIMIT 3, 1) AS date_next_to_last
+FROM t1;
+id name date_last date_next_to_last
+1 Balazs 2006-05-01 NULL
+2 Joe 2006-04-20 NULL
+3 Frank 2006-04-13 NULL
+SELECT *,
+(SELECT COUNT(*) FROM t2 WHERE mid = t1.id
+ORDER BY date DESC LIMIT 1, 1) AS date_count
+FROM t1;
+id name date_count
+1 Balazs NULL
+2 Joe NULL
+3 Frank NULL
+SELECT *,
+(SELECT date FROM t2 WHERE mid = t1.id
+ORDER BY date DESC LIMIT 0, 1) AS date_last,
+(SELECT date FROM t2 WHERE mid = t1.id
+ORDER BY date DESC LIMIT 1, 1) AS date_next_to_last
+FROM t1;
+id name date_last date_next_to_last
+1 Balazs 2006-05-01 2006-03-30
+2 Joe 2006-04-20 2006-04-06
+3 Frank 2006-04-13 NULL
+DROP TABLE t1,t2;
+CREATE TABLE t1 (
+i1 int(11) NOT NULL default '0',
+i2 int(11) NOT NULL default '0',
+t datetime NOT NULL default '0000-00-00 00:00:00',
+PRIMARY KEY (i1,i2,t)
+);
+INSERT INTO t1 VALUES
+(24,1,'2005-03-03 16:31:31'),(24,1,'2005-05-27 12:40:07'),
+(24,1,'2005-05-27 12:40:08'),(24,1,'2005-05-27 12:40:10'),
+(24,1,'2005-05-27 12:40:25'),(24,1,'2005-05-27 12:40:30'),
+(24,2,'2005-03-03 13:43:05'),(24,2,'2005-03-03 16:23:31'),
+(24,2,'2005-03-03 16:31:30'),(24,2,'2005-05-27 12:37:02'),
+(24,2,'2005-05-27 12:40:06');
+CREATE TABLE t2 (
+i1 int(11) NOT NULL default '0',
+i2 int(11) NOT NULL default '0',
+t datetime default NULL,
+PRIMARY KEY (i1)
+);
+INSERT INTO t2 VALUES (24,1,'2006-06-20 12:29:40');
+EXPLAIN
+SELECT * FROM t1,t2
+WHERE t1.t = (SELECT t1.t FROM t1
+WHERE t1.t < t2.t AND t1.i2=1 AND t2.i1=t1.i1
+ORDER BY t1.t DESC LIMIT 1);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t2 system NULL NULL NULL NULL 1
+1 PRIMARY t1 index NULL PRIMARY 16 NULL 11 Using where; Using index
+2 DEPENDENT SUBQUERY t1 range PRIMARY PRIMARY 16 NULL 5 Using where; Using index
+SELECT * FROM t1,t2
+WHERE t1.t = (SELECT t1.t FROM t1
+WHERE t1.t < t2.t AND t1.i2=1 AND t2.i1=t1.i1
+ORDER BY t1.t DESC LIMIT 1);
+i1 i2 t i1 i2 t
+24 1 2005-05-27 12:40:30 24 1 2006-06-20 12:29:40
+DROP TABLE t1, t2;
diff --git a/mysql-test/r/subselect2.result b/mysql-test/r/subselect2.result
index 026bcb4b370..75aa339fb29 100644
--- a/mysql-test/r/subselect2.result
+++ b/mysql-test/r/subselect2.result
@@ -132,3 +132,15 @@ id select_type table type possible_keys key key_len ref rows Extra
5 DEPENDENT SUBQUERY t3 unique_subquery PRIMARY,FFOLDERID_IDX PRIMARY 34 func 1 Using index; Using where
6 DEPENDENT SUBQUERY t3 unique_subquery PRIMARY,FFOLDERID_IDX,CMFLDRPARNT_IDX PRIMARY 34 func 1 Using index; Using where
drop table t1, t2, t3, t4;
+CREATE TABLE t1 (a int(10) , PRIMARY KEY (a)) Engine=InnoDB;
+INSERT INTO t1 VALUES (1),(2);
+CREATE TABLE t2 (a int(10), PRIMARY KEY (a)) Engine=InnoDB;
+INSERT INTO t2 VALUES (1);
+CREATE TABLE t3 (a int(10), b int(10), c int(10),
+PRIMARY KEY (a)) Engine=InnoDB;
+INSERT INTO t3 VALUES (1,2,1);
+SELECT t1.* FROM t1 WHERE (SELECT COUNT(*) FROM t3,t2 WHERE t3.c=t2.a
+and t2.a='1' AND t1.a=t3.b) > 0;
+a
+2
+DROP TABLE t1,t2,t3;
diff --git a/mysql-test/r/symlink.result b/mysql-test/r/symlink.result
index 3b24210dd5d..efd6f8710aa 100644
--- a/mysql-test/r/symlink.result
+++ b/mysql-test/r/symlink.result
@@ -74,18 +74,24 @@ t9 CREATE TABLE `t9` (
) ENGINE=MyISAM AUTO_INCREMENT=16725 DEFAULT CHARSET=latin1 DATA DIRECTORY='MYSQLTEST_VARDIR/tmp/' INDEX DIRECTORY='MYSQLTEST_VARDIR/run/'
drop database mysqltest;
create table t1 (a int not null) engine=myisam;
+Warnings:
+Warning 0 DATA DIRECTORY option ignored
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` int(11) NOT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1
alter table t1 add b int;
+Warnings:
+Warning 0 DATA DIRECTORY option ignored
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` int(11) NOT NULL,
`b` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1
+Warnings:
+Warning 0 INDEX DIRECTORY option ignored
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
diff --git a/mysql-test/r/trigger.result b/mysql-test/r/trigger.result
index 145579201a9..2f24d7f1d52 100644
--- a/mysql-test/r/trigger.result
+++ b/mysql-test/r/trigger.result
@@ -295,7 +295,7 @@ create trigger trg before insert on t1 for each row set @a:=1;
create trigger trg after insert on t1 for each row set @a:=1;
ERROR HY000: Trigger already exists
create trigger trg2 before insert on t1 for each row set @a:=1;
-ERROR HY000: Trigger already exists
+ERROR 42000: This version of MySQL doesn't yet support 'multiple triggers with the same action time and event for one table'
create trigger trg before insert on t3 for each row set @a:=1;
ERROR HY000: Trigger already exists
create trigger trg2 before insert on t3 for each row set @a:=1;
@@ -1078,3 +1078,15 @@ i1
43
51
DROP TABLE t1;
+create trigger wont_work after update on mysql.user for each row
+begin
+set @a:= 1;
+end|
+ERROR HY000: Triggers can not be created on system tables
+use mysql|
+create trigger wont_work after update on event for each row
+begin
+set @a:= 1;
+end|
+ERROR HY000: Triggers can not be created on system tables
+End of 5.0 tests
diff --git a/mysql-test/r/type_blob.result b/mysql-test/r/type_blob.result
index ff92ea14f97..d05e9403d11 100644
--- a/mysql-test/r/type_blob.result
+++ b/mysql-test/r/type_blob.result
@@ -517,7 +517,7 @@ coercibility(load_file('../../std_data/words.dat'));
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select sql_no_cache charset(load_file(_latin1'../../std_data/words.dat')) AS `charset(load_file('../../std_data/words.dat'))`,collation(load_file(_latin1'../../std_data/words.dat')) AS `collation(load_file('../../std_data/words.dat'))`,coercibility(load_file(_latin1'../../std_data/words.dat')) AS `coercibility(load_file('../../std_data/words.dat'))`
+Note 1003 select charset(load_file(_latin1'../../std_data/words.dat')) AS `charset(load_file('../../std_data/words.dat'))`,collation(load_file(_latin1'../../std_data/words.dat')) AS `collation(load_file('../../std_data/words.dat'))`,coercibility(load_file(_latin1'../../std_data/words.dat')) AS `coercibility(load_file('../../std_data/words.dat'))`
update t1 set imagem=load_file('../../std_data/words.dat') where id=1;
select if(imagem is null, "ERROR", "OK"),length(imagem) from t1 where id = 1;
if(imagem is null, "ERROR", "OK") length(imagem)
diff --git a/mysql-test/r/type_newdecimal.result b/mysql-test/r/type_newdecimal.result
index b15683a0882..9755083ab1c 100644
--- a/mysql-test/r/type_newdecimal.result
+++ b/mysql-test/r/type_newdecimal.result
@@ -1397,6 +1397,16 @@ c1
9999999999999999999999999999999999999999999999999999999999999999
9999999999999999999999999999999999999999999999999999999999999999
drop table t1;
+create table t1(a decimal(7,2));
+insert into t1 values(123.12);
+select * from t1;
+a
+123.12
+alter table t1 modify a decimal(10,2);
+select * from t1;
+a
+123.12
+drop table t1;
create table t1 (i int, j int);
insert into t1 values (1,1), (1,2), (2,3), (2,4);
select i, count(distinct j) from t1 group by i;
diff --git a/mysql-test/r/type_ranges.result b/mysql-test/r/type_ranges.result
index df3298bad1a..32bb6abf7ac 100644
--- a/mysql-test/r/type_ranges.result
+++ b/mysql-test/r/type_ranges.result
@@ -273,7 +273,7 @@ create table t2 (primary key (auto)) select auto+1 as auto,1 as t1, 'a' as t2, r
show full columns from t2;
Field Type Collation Null Key Default Extra Privileges Comment
auto bigint(12) unsigned NULL NO PRI 0 #
-t1 bigint(1) NULL NO 0 #
+t1 int(1) NULL NO 0 #
t2 varchar(1) latin1_swedish_ci NO #
t3 varchar(256) latin1_swedish_ci NO #
t4 varbinary(256) NULL NO #
@@ -301,7 +301,7 @@ show full columns from t3;
Field Type Collation Null Key Default Extra Privileges Comment
c1 int(11) NULL YES NULL #
c2 int(11) NULL YES NULL #
-const bigint(1) NULL NO 0 #
+const int(1) NULL NO 0 #
drop table t1,t2,t3;
create table t1 ( myfield INT NOT NULL, UNIQUE INDEX (myfield), unique (myfield), index(myfield));
drop table t1;
diff --git a/mysql-test/r/type_timestamp.result b/mysql-test/r/type_timestamp.result
index 6bec1e2b46b..b1c55c517ac 100644
--- a/mysql-test/r/type_timestamp.result
+++ b/mysql-test/r/type_timestamp.result
@@ -1,4 +1,5 @@
drop table if exists t1,t2;
+set time_zone="+03:00";
CREATE TABLE t1 (a int, t timestamp);
CREATE TABLE t2 (a int, t datetime);
SET TIMESTAMP=1234;
@@ -491,3 +492,4 @@ a b c
5 NULL 2001-09-09 04:46:59
6 NULL 2006-06-06 06:06:06
drop table t1;
+set time_zone= @@global.time_zone;
diff --git a/mysql-test/r/udf.result b/mysql-test/r/udf.result
index 01aa8539262..624895f71c9 100644
--- a/mysql-test/r/udf.result
+++ b/mysql-test/r/udf.result
@@ -76,6 +76,30 @@ call XXX2();
metaphon(testval)
HL
drop procedure xxx2;
+CREATE TABLE bug19904(n INT, v varchar(10));
+INSERT INTO bug19904 VALUES (1,'one'),(2,'two'),(NULL,NULL),(3,'three'),(4,'four');
+SELECT myfunc_double(n) AS f FROM bug19904;
+f
+49.00
+50.00
+NULL
+51.00
+52.00
+SELECT metaphon(v) AS f FROM bug19904;
+f
+ON
+TW
+NULL
+0R
+FR
+DROP TABLE bug19904;
+create table t1(f1 int);
+insert into t1 values(1),(2);
+explain select myfunc_int(f1) from t1 order by 1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 Using temporary; Using filesort
+drop table t1;
+End of 5.0 tests.
DROP FUNCTION metaphon;
DROP FUNCTION myfunc_double;
DROP FUNCTION myfunc_nonexist;
diff --git a/mysql-test/r/union.result b/mysql-test/r/union.result
index 255fc4d6205..b96f97b535c 100644
--- a/mysql-test/r/union.result
+++ b/mysql-test/r/union.result
@@ -691,9 +691,9 @@ t1 CREATE TABLE `t1` (
`da` datetime DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1
drop table t1;
-create table t1 SELECT dt from t2 UNION select sc from t2;
-select * from t1;
-dt
+create table t1 SELECT dt from t2 UNION select trim(sc) from t2;
+select trim(dt) from t1;
+trim(dt)
1972-10-22 11:50:00
testc
show create table t1;
@@ -732,7 +732,7 @@ tetetetetest
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
- `dt` longblob
+ `dt` blob
) ENGINE=MyISAM DEFAULT CHARSET=latin1
drop table t1;
create table t1 SELECT sv from t2 UNION select b from t2;
@@ -743,7 +743,7 @@ tetetetetest
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
- `sv` longblob
+ `sv` blob
) ENGINE=MyISAM DEFAULT CHARSET=latin1
drop table t1;
create table t1 SELECT i from t2 UNION select d from t2 UNION select b from t2;
@@ -755,7 +755,7 @@ tetetetetest
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
- `i` longblob
+ `i` blob
) ENGINE=MyISAM DEFAULT CHARSET=latin1
drop table t1;
create table t1 SELECT sv from t2 UNION select tx from t2;
@@ -766,7 +766,7 @@ teeeeeeeeeeeest
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
- `sv` longtext
+ `sv` text
) ENGINE=MyISAM DEFAULT CHARSET=latin1
drop table t1;
create table t1 SELECT b from t2 UNION select tx from t2;
@@ -777,7 +777,7 @@ teeeeeeeeeeeest
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
- `b` longblob
+ `b` blob
) ENGINE=MyISAM DEFAULT CHARSET=latin1
drop table t1,t2;
create table t1 select 1 union select -1;
@@ -1306,6 +1306,21 @@ id
5
99
drop table t1;
+create table t1(f1 char(1), f2 char(5), f3 binary(1), f4 binary(5), f5 timestamp, f6 varchar(1) character set utf8 collate utf8_general_ci, f7 text);
+create table t2 as select *, f6 as f8 from t1 union select *, f7 from t1;
+show create table t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `f1` char(1) DEFAULT NULL,
+ `f2` char(5) DEFAULT NULL,
+ `f3` binary(1) DEFAULT NULL,
+ `f4` binary(5) DEFAULT NULL,
+ `f5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00',
+ `f6` varchar(1) CHARACTER SET utf8 DEFAULT NULL,
+ `f7` text,
+ `f8` text CHARACTER SET utf8
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+drop table t1, t2;
(select avg(1)) union (select avg(1)) union (select avg(1)) union
(select avg(1)) union (select avg(1)) union (select avg(1)) union
(select avg(1)) union (select avg(1)) union (select avg(1)) union
diff --git a/mysql-test/r/variables.result b/mysql-test/r/variables.result
index 6cc656951a6..3b6bfc60a80 100644
--- a/mysql-test/r/variables.result
+++ b/mysql-test/r/variables.result
@@ -75,7 +75,7 @@ explain extended select @t1:=(@t2:=1)+@t3:=4,@t1,@t2,@t3;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select sql_no_cache (@t1:=((@t2:=1) + (@t3:=4))) AS `@t1:=(@t2:=1)+@t3:=4`,(@t1) AS `@t1`,(@t2) AS `@t2`,(@t3) AS `@t3`
+Note 1003 select (@t1:=((@t2:=1) + (@t3:=4))) AS `@t1:=(@t2:=1)+@t3:=4`,(@t1) AS `@t1`,(@t2) AS `@t2`,(@t3) AS `@t3`
select @t5;
@t5
1.23456
@@ -135,7 +135,7 @@ explain extended select last_insert_id(345);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select sql_no_cache last_insert_id(345) AS `last_insert_id(345)`
+Note 1003 select last_insert_id(345) AS `last_insert_id(345)`
select @@IDENTITY,last_insert_id(), @@identity;
@@IDENTITY last_insert_id() @@identity
345 345 345
@@ -143,7 +143,7 @@ explain extended select @@IDENTITY,last_insert_id(), @@identity;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select sql_no_cache 345 AS `@@IDENTITY`,last_insert_id() AS `last_insert_id()`,345 AS `@@identity`
+Note 1003 select 345 AS `@@IDENTITY`,last_insert_id() AS `last_insert_id()`,345 AS `@@identity`
set big_tables=OFF, big_tables=ON, big_tables=0, big_tables=1, big_tables="OFF", big_tables="ON";
set global concurrent_insert=2;
show variables like 'concurrent_insert';
@@ -421,6 +421,28 @@ set tmp_table_size=100;
set tx_isolation="READ-COMMITTED";
set wait_timeout=100;
set log_warnings=1;
+select @@session.insert_id;
+@@session.insert_id
+1
+set @save_insert_id=@@session.insert_id;
+set session insert_id=20;
+select @@session.insert_id;
+@@session.insert_id
+20
+set session last_insert_id=100;
+select @@session.insert_id;
+@@session.insert_id
+20
+select @@session.last_insert_id;
+@@session.last_insert_id
+100
+select @@session.insert_id;
+@@session.insert_id
+20
+set @@session.insert_id=@save_insert_id;
+select @@session.insert_id;
+@@session.insert_id
+1
create table t1 (a int not null auto_increment, primary key(a));
create table t2 (a int not null auto_increment, primary key(a));
insert into t1 values(null),(null),(null);
diff --git a/mysql-test/r/view.result b/mysql-test/r/view.result
index 6deba3f278a..a7d797773ce 100644
--- a/mysql-test/r/view.result
+++ b/mysql-test/r/view.result
@@ -672,7 +672,7 @@ drop table t1;
CREATE VIEW v1 (f1,f2,f3,f4) AS SELECT connection_id(), pi(), current_user(), version();
SHOW CREATE VIEW v1;
View Create View
-v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache connection_id() AS `f1`,pi() AS `f2`,current_user() AS `f3`,version() AS `f4`
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select connection_id() AS `f1`,pi() AS `f2`,current_user() AS `f3`,version() AS `f4`
drop view v1;
create table t1 (s1 int);
create table t2 (s2 int);
@@ -787,7 +787,7 @@ create function `f``1` () returns int return 5;
create view v1 as select test.`f``1` ();
show create view v1;
View Create View
-v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache `test`.`f``1`() AS `test.``f````1`` ()`
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `test`.`f``1`() AS `test.``f````1`` ()`
select * from v1;
test.`f``1` ()
5
@@ -1868,14 +1868,14 @@ create table t2 (b timestamp default now());
create view v1 as select a,b,t1.a < now() from t1,t2 where t1.a < now();
SHOW CREATE VIEW v1;
View Create View
-v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache `t1`.`a` AS `a`,`t2`.`b` AS `b`,(`t1`.`a` < now()) AS `t1.a < now()` from (`t1` join `t2`) where (`t1`.`a` < now())
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`a` AS `a`,`t2`.`b` AS `b`,(`t1`.`a` < now()) AS `t1.a < now()` from (`t1` join `t2`) where (`t1`.`a` < now())
drop view v1;
drop table t1, t2;
CREATE TABLE t1 ( a varchar(50) );
CREATE VIEW v1 AS SELECT * FROM t1 WHERE a = CURRENT_USER();
SHOW CREATE VIEW v1;
View Create View
-v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache `t1`.`a` AS `a` from `t1` where (`t1`.`a` = current_user())
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`a` AS `a` from `t1` where (`t1`.`a` = current_user())
DROP VIEW v1;
CREATE VIEW v1 AS SELECT * FROM t1 WHERE a = VERSION();
SHOW CREATE VIEW v1;
@@ -1885,7 +1885,7 @@ DROP VIEW v1;
CREATE VIEW v1 AS SELECT * FROM t1 WHERE a = DATABASE();
SHOW CREATE VIEW v1;
View Create View
-v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache `t1`.`a` AS `a` from `t1` where (`t1`.`a` = database())
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`a` AS `a` from `t1` where (`t1`.`a` = database())
DROP VIEW v1;
DROP TABLE t1;
CREATE TABLE t1 (col1 time);
@@ -2538,7 +2538,7 @@ show create view v1;
drop view v1;
//
View Create View
-v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache `test`.`t1`.`id` AS `id` from `t1`
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `test`.`t1`.`id` AS `id` from `t1`
create table t1(f1 int, f2 int);
create view v1 as select ta.f1 as a, tb.f1 as b from t1 ta, t1 tb where ta.f1=tb
.f1 and ta.f2=tb.f2;
@@ -2683,7 +2683,7 @@ SELECT (year(now())-year(DOB)) AS Age
FROM t1 HAVING Age < 75;
SHOW CREATE VIEW v1;
View Create View
-v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache (year(now()) - year(`t1`.`DOB`)) AS `Age` from `t1` having (`Age` < 75)
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select (year(now()) - year(`t1`.`DOB`)) AS `Age` from `t1` having (`Age` < 75)
SELECT (year(now())-year(DOB)) AS Age FROM t1 HAVING Age < 75;
Age
42
@@ -2735,4 +2735,88 @@ m e
4 a
1 b
DROP VIEW v1;
-DROP TABLE IF EXISTS t1,t2;
+DROP TABLE t1,t2;
+CREATE TABLE t1 (a INT NOT NULL, b INT NULL DEFAULT NULL);
+CREATE VIEW v1 AS SELECT a, b FROM t1;
+INSERT INTO v1 (b) VALUES (2);
+Warnings:
+Warning 1423 Field of view 'test.v1' underlying table doesn't have a default value
+SET SQL_MODE = STRICT_ALL_TABLES;
+INSERT INTO v1 (b) VALUES (4);
+ERROR HY000: Field of view 'test.v1' underlying table doesn't have a default value
+SET SQL_MODE = '';
+SELECT * FROM t1;
+a b
+0 2
+DROP VIEW v1;
+DROP TABLE t1;
+CREATE TABLE t1 (firstname text, surname text);
+INSERT INTO t1 VALUES
+("Bart","Simpson"),("Milhouse","van Houten"),("Montgomery","Burns");
+CREATE VIEW v1 AS SELECT CONCAT(firstname," ",surname) AS name FROM t1;
+SELECT CONCAT(LEFT(name,LENGTH(name)-INSTR(REVERSE(name)," ")),
+LEFT(name,LENGTH(name)-INSTR(REVERSE(name)," "))) AS f1
+FROM v1;
+f1
+BartBart
+Milhouse vanMilhouse van
+MontgomeryMontgomery
+DROP VIEW v1;
+DROP TABLE t1;
+CREATE TABLE t1 (i int, j int);
+CREATE VIEW v1 AS SELECT COALESCE(i,j) FROM t1;
+DESCRIBE v1;
+Field Type Null Key Default Extra
+COALESCE(i,j) int(11) YES NULL
+CREATE TABLE t2 SELECT COALESCE(i,j) FROM t1;
+DESCRIBE t2;
+Field Type Null Key Default Extra
+COALESCE(i,j) int(11) YES NULL
+DROP VIEW v1;
+DROP TABLE t1,t2;
+CREATE TABLE t1 (s varchar(10));
+INSERT INTO t1 VALUES ('yadda'), ('yady');
+SELECT TRIM(BOTH 'y' FROM s) FROM t1;
+TRIM(BOTH 'y' FROM s)
+adda
+ad
+CREATE VIEW v1 AS SELECT TRIM(BOTH 'y' FROM s) FROM t1;
+SELECT * FROM v1;
+TRIM(BOTH 'y' FROM s)
+adda
+ad
+DROP VIEW v1;
+SELECT TRIM(LEADING 'y' FROM s) FROM t1;
+TRIM(LEADING 'y' FROM s)
+adda
+ady
+CREATE VIEW v1 AS SELECT TRIM(LEADING 'y' FROM s) FROM t1;
+SELECT * FROM v1;
+TRIM(LEADING 'y' FROM s)
+adda
+ady
+DROP VIEW v1;
+SELECT TRIM(TRAILING 'y' FROM s) FROM t1;
+TRIM(TRAILING 'y' FROM s)
+yadda
+yad
+CREATE VIEW v1 AS SELECT TRIM(TRAILING 'y' FROM s) FROM t1;
+SELECT * FROM v1;
+TRIM(TRAILING 'y' FROM s)
+yadda
+yad
+DROP VIEW v1;
+DROP TABLE t1;
+CREATE TABLE t1 (s1 char);
+INSERT INTO t1 VALUES ('Z');
+CREATE VIEW v1 AS SELECT s1 collate latin1_german1_ci AS col FROM t1;
+CREATE VIEW v2 (col) AS SELECT s1 collate latin1_german1_ci FROM t1;
+INSERT INTO v1 (col) VALUES ('b');
+INSERT INTO v2 (col) VALUES ('c');
+SELECT s1 FROM t1;
+s1
+Z
+b
+c
+DROP VIEW v1, v2;
+DROP TABLE t1;
diff --git a/mysql-test/r/view_grant.result b/mysql-test/r/view_grant.result
index 5f91127d284..30e9d1010ba 100644
--- a/mysql-test/r/view_grant.result
+++ b/mysql-test/r/view_grant.result
@@ -661,3 +661,56 @@ DROP VIEW test2.t3;
DROP TABLE test2.t1, test1.t0;
DROP DATABASE test2;
DROP DATABASE test1;
+DROP VIEW IF EXISTS v1;
+DROP VIEW IF EXISTS v2;
+DROP VIEW IF EXISTS v3;
+DROP FUNCTION IF EXISTS f1;
+DROP FUNCTION IF EXISTS f2;
+DROP PROCEDURE IF EXISTS p1;
+CREATE SQL SECURITY DEFINER VIEW v1 AS SELECT CURRENT_USER() AS cu;
+CREATE FUNCTION f1() RETURNS VARCHAR(77) SQL SECURITY INVOKER
+RETURN CURRENT_USER();
+CREATE SQL SECURITY DEFINER VIEW v2 AS SELECT f1() AS cu;
+CREATE PROCEDURE p1(OUT cu VARCHAR(77)) SQL SECURITY INVOKER
+SET cu= CURRENT_USER();
+CREATE FUNCTION f2() RETURNS VARCHAR(77) SQL SECURITY INVOKER
+BEGIN
+DECLARE cu VARCHAR(77);
+CALL p1(cu);
+RETURN cu;
+END|
+CREATE SQL SECURITY DEFINER VIEW v3 AS SELECT f2() AS cu;
+CREATE USER mysqltest_u1@localhost;
+GRANT ALL ON test.* TO mysqltest_u1@localhost;
+
+The following tests should all return 1.
+
+SELECT CURRENT_USER() = 'mysqltest_u1@localhost';
+CURRENT_USER() = 'mysqltest_u1@localhost'
+1
+SELECT f1() = 'mysqltest_u1@localhost';
+f1() = 'mysqltest_u1@localhost'
+1
+CALL p1(@cu);
+SELECT @cu = 'mysqltest_u1@localhost';
+@cu = 'mysqltest_u1@localhost'
+1
+SELECT f2() = 'mysqltest_u1@localhost';
+f2() = 'mysqltest_u1@localhost'
+1
+SELECT cu = 'root@localhost' FROM v1;
+cu = 'root@localhost'
+1
+SELECT cu = 'root@localhost' FROM v2;
+cu = 'root@localhost'
+1
+SELECT cu = 'root@localhost' FROM v3;
+cu = 'root@localhost'
+1
+DROP VIEW v3;
+DROP FUNCTION f2;
+DROP PROCEDURE p1;
+DROP FUNCTION f1;
+DROP VIEW v2;
+DROP VIEW v1;
+DROP USER mysqltest_u1@localhost;
diff --git a/mysql-test/r/wait_timeout.result b/mysql-test/r/wait_timeout.result
index 683986abf5d..b865a17454d 100644
--- a/mysql-test/r/wait_timeout.result
+++ b/mysql-test/r/wait_timeout.result
@@ -1,3 +1,7 @@
+select 0;
+0
+0
+flush status;
select 1;
1
1
diff --git a/mysql-test/std_data/init_file.dat b/mysql-test/std_data/init_file.dat
index 6105ca2ac1b..814e968eb31 100644
--- a/mysql-test/std_data/init_file.dat
+++ b/mysql-test/std_data/init_file.dat
@@ -1 +1,29 @@
select * from mysql.user as t1, mysql.user as t2, mysql.user as t3;
+use test;
+
+drop table if exists t1;
+create table t1 (x int);
+drop table if exists t2;
+create table t2 (y int);
+
+drop procedure if exists p1;
+create definer=root@localhost procedure p1() select * from t1;
+call p1();
+drop procedure p1;
+
+create definer=root@localhost procedure p1() insert into t1 values (3),(5),(7);
+call p1();
+
+drop function if exists f1;
+create definer=root@localhost function f1() returns int return (select count(*) from t1);
+insert into t2 set y = f1()*10;
+
+drop view if exists v1;
+create definer=root@localhost view v1 as select f1();
+insert into t2 (y) select * from v1;
+
+create trigger trg1 after insert on t2 for each row insert into t1 values (new.y);
+insert into t2 values (11), (13);
+drop procedure p1;
+drop function f1;
+drop view v1;
diff --git a/mysql-test/t/auto_increment.test b/mysql-test/t/auto_increment.test
index 2674639d0ac..7cef1bad784 100644
--- a/mysql-test/t/auto_increment.test
+++ b/mysql-test/t/auto_increment.test
@@ -303,3 +303,50 @@ INSERT INTO t1 VALUES(1, 1);
--error ER_DUP_ENTRY
ALTER TABLE t1 CHANGE t1 t1 INT(10) auto_increment;
DROP TABLE t1;
+
+# Fix for BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY
+# UPDATE": now LAST_INSERT_ID() will return the id of the updated
+# row.
+CREATE TABLE `t2` (
+ `k` int(11) NOT NULL auto_increment,
+ `a` int(11) default NULL,
+ `c` int(11) default NULL,
+ PRIMARY KEY (`k`),
+ UNIQUE KEY `idx_1` (`a`)
+) ENGINE=InnoDB;
+ insert into t2 ( a ) values ( 6 ) on duplicate key update c =
+ifnull( c,
+0 ) + 1;
+insert into t2 ( a ) values ( 7 ) on duplicate key update c =
+ifnull( c,
+0 ) + 1;
+select last_insert_id();
+select * from t2;
+insert into t2 ( a ) values ( 6 ) on duplicate key update c =
+ifnull( c,
+0 ) + 1;
+select last_insert_id();
+select * from t2;
+
+# Test of LAST_INSERT_ID() when autogenerated will fail:
+# last_insert_id() should not change
+insert ignore into t2 values (null,6,1),(10,8,1);
+select last_insert_id();
+# First and second autogenerated will fail, last_insert_id() should
+# point to third
+insert ignore into t2 values (null,6,1),(null,8,1),(null,15,1),(null,20,1);
+select last_insert_id();
+select * from t2;
+
+drop table t2;
+
+# Test of REPLACE when it does INSERT+DELETE and not UPDATE:
+# see if it sets LAST_INSERT_ID() ok
+create table t1 (a int primary key auto_increment, b int, c int, d timestamp default current_timestamp, unique(b),unique(c));
+insert into t1 values(null,1,1,now());
+insert into t1 values(null,0,0,null);
+# this will delete two rows
+replace into t1 values(null,1,0,null);
+select last_insert_id();
+
+drop table t1;
diff --git a/mysql-test/t/bdb.test b/mysql-test/t/bdb.test
index dbf5559943a..ebee341907c 100644
--- a/mysql-test/t/bdb.test
+++ b/mysql-test/t/bdb.test
@@ -1045,6 +1045,7 @@ commit;
alter table t1 add primary key(a);
drop table t1;
+
--echo End of 5.0 tests
#
diff --git a/mysql-test/t/create.test b/mysql-test/t/create.test
index 714e3844e66..140cdccc218 100644
--- a/mysql-test/t/create.test
+++ b/mysql-test/t/create.test
@@ -521,7 +521,7 @@ DROP TABLE t12913;
create database mysqltest;
use mysqltest;
drop database mysqltest;
---error 1102
+--error ER_NO_DB_ERROR
create table test.t1 like x;
--disable_warnings
drop table if exists test.t1;
@@ -670,6 +670,7 @@ alter table t1 max_rows=100000000000;
show create table t1;
drop table t1;
+
# End of 5.0 tests
#
diff --git a/mysql-test/t/create_not_windows.test b/mysql-test/t/create_not_windows.test
index 71ad9ccd7fe..2c1700d9e49 100644
--- a/mysql-test/t/create_not_windows.test
+++ b/mysql-test/t/create_not_windows.test
@@ -18,3 +18,24 @@ show create table `about:text`;
drop table `about:text`;
# End of 5.0 tests
+
+#
+# Bug#16532:mysql server assert in debug if table det is removed
+#
+use test;
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+create table t1(a int) engine=myisam;
+insert into t1 values(1);
+--system rm -f $MYSQLTEST_VARDIR/master-data/test/t1.frm
+--echo "We get an error because the table is in the definition cache"
+--error ER_TABLE_EXISTS_ERROR
+create table t1(a int, b int);
+--echo "Flush the cache and recreate the table anew to be able to drop it"
+flush tables;
+show open tables like "t%";
+create table t1(a int, b int, c int);
+--echo "Try to select from the table. This should not crash the server"
+select count(a) from t1;
+drop table t1;
diff --git a/mysql-test/t/ctype_ucs2_def-master.opt b/mysql-test/t/ctype_ucs2_def-master.opt
index 1f884ff1d67..a0b5b061860 100644
--- a/mysql-test/t/ctype_ucs2_def-master.opt
+++ b/mysql-test/t/ctype_ucs2_def-master.opt
@@ -1 +1 @@
---default-character-set=ucs2 --default-collation=ucs2_unicode_ci
+--default-collation=ucs2_unicode_ci --default-character-set=ucs2
diff --git a/mysql-test/t/ctype_ucs2_def.test b/mysql-test/t/ctype_ucs2_def.test
index fb174d551cf..00f636d79dc 100644
--- a/mysql-test/t/ctype_ucs2_def.test
+++ b/mysql-test/t/ctype_ucs2_def.test
@@ -1,4 +1,9 @@
#
+# MySQL Bug#15276: MySQL ignores collation-server
+#
+show variables like 'collation_server';
+
+#
# Bug#18004 Connecting crashes server when default charset is UCS2
#
show variables like "%character_set_ser%";
diff --git a/mysql-test/t/date_formats.test b/mysql-test/t/date_formats.test
index 39a530ba733..6898cd5802d 100644
--- a/mysql-test/t/date_formats.test
+++ b/mysql-test/t/date_formats.test
@@ -265,6 +265,20 @@ select str_to_date("2003-04-05 g", "%Y-%m-%d") as f1,
--enable_ps_protocol
#
+# Test of locale dependent date format (WL#2928 Date Translation NRE)
+#
+set names latin1;
+select date_format('2004-01-01','%W (%a), %e %M (%b) %Y');
+set lc_time_names=ru_RU;
+set names koi8r;
+select date_format('2004-01-01','%W (%a), %e %M (%b) %Y');
+set lc_time_names=de_DE;
+set names latin1;
+select date_format('2004-01-01','%W (%a), %e %M (%b) %Y');
+set names latin1;
+set lc_time_names=en_US;
+
+#
# Bug #14016
#
create table t1 (f1 datetime);
@@ -279,7 +293,6 @@ drop table t1;
select str_to_date( 1, NULL );
select str_to_date( NULL, 1 );
select str_to_date( 1, IF(1=1,NULL,NULL) );
-# End of 4.1 tests
#
# Bug#11326
@@ -310,3 +323,10 @@ SELECT TIME_FORMAT("12:00:00", '%l %p');
SELECT TIME_FORMAT("23:00:00", '%l %p');
SELECT TIME_FORMAT("24:00:00", '%l %p');
SELECT TIME_FORMAT("25:00:00", '%l %p');
+
+#
+# Bug#20729: Bad date_format() call makes mysql server crash
+#
+SELECT DATE_FORMAT('%Y-%m-%d %H:%i:%s', 1151414896);
+
+--echo "End of 4.1 tests"
diff --git a/mysql-test/t/disabled.def b/mysql-test/t/disabled.def
index 6f26847f8d7..8030aecc1b7 100644
--- a/mysql-test/t/disabled.def
+++ b/mysql-test/t/disabled.def
@@ -14,14 +14,13 @@
#events : BUG#17619 2006-02-21 andrey Race conditions
#events_scheduling : BUG#19170 2006-04-26 andrey Test case of 19170 fails on some platforms. Has to be checked.
#im_instance_conf : Bug#20294 2006-06-06 monty Instance manager test im_instance_conf fails randomly
-#im_options : Bug#20294 2006-06-06 monty Instance manager test im_instance_conf fails randomly
+im_options : Bug#20294 2006-07-24 stewart Instance manager test im_instance_conf fails randomly
#im_life_cycle : Bug#20368 2006-06-10 alik im_life_cycle test fails
ndb_autodiscover : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog
ndb_autodiscover2 : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog
-#ndb_binlog_discover : BUG#19395 2006-04-28 tomas/knielsen mysqld does not always detect cluster shutdown
-#ndb_cache2 : BUG#18597 2006-03-28 brian simultaneous drop table and ndb statistics update triggers node failure
-#ndb_cache_multi2 : BUG#18597 2006-04-10 kent simultaneous drop table and ndb statistics update triggers node failure
+ndb_binlog_ignore_db : BUG#21279 2006-07-25 ingo Randomly throws a warning
ndb_load : BUG#17233 2006-05-04 tomas failed load data from infile causes mysqld dbug_assert, binlog not flushed
+ndb_restore_compat : BUG#21283 2006-07-26 ingo Test fails randomly
partition_03ndb : BUG#16385 2006-03-24 mikael Partitions: crash when updating a range partitioned NDB table
ps_7ndb : BUG#18950 2006-02-16 jmiller create table like does not obtain LOCK_open
rpl_ndb_2innodb : BUG#19227 2006-04-20 pekka pk delete apparently not replicated
@@ -32,7 +31,6 @@ rpl_ndb_ddl : BUG#18946 result file needs update + test needs to ch
rpl_ndb_innodb2ndb : Bug #19710 Cluster replication to partition table fails on DELETE FROM statement
#rpl_ndb_log : BUG#18947 2006-03-21 tomas CRBR: order in binlog of create table and insert (on different table) not determ
rpl_ndb_myisam2ndb : Bug #19710 Cluster replication to partition table fails on DELETE FROM statement
-rpl_switch_stm_row_mixed : BUG#18590 2006-03-28 brian
rpl_row_blob_innodb : BUG#18980 2006-04-10 kent Test fails randomly
rpl_row_func003 : BUG#19074 2006-13-04 andrei test failed
rpl_sp : BUG#16456 2006-02-16 jmiller
@@ -40,3 +38,8 @@ rpl_sp_effects : BUG#19862 2006-06-15 mkindahl
# the below testcase have been reworked to avoid the bug, test contains comment, keep bug open
#ndb_binlog_ddl_multi : BUG#18976 2006-04-10 kent CRBR: multiple binlog, second binlog may miss schema log events
+rpl_ndb_idempotent : BUG#21298 2006-07-27 msvensson
+rpl_row_basic_7ndb : BUG#21298 2006-07-27 msvensson
+rpl_truncate_7ndb : BUG#21298 2006-07-27 msvensson
+
+rpl_ndb_dd_advance : BUG#18679 2006-07-28 jimw (Test fails randomly)
diff --git a/mysql-test/t/events_logs_tests.test b/mysql-test/t/events_logs_tests.test
index 5f4ec852cd3..5c252b1174b 100644
--- a/mysql-test/t/events_logs_tests.test
+++ b/mysql-test/t/events_logs_tests.test
@@ -69,8 +69,8 @@ SELECT user_host, query_time, db, sql_text FROM mysql.slow_log;
SET SESSION long_query_time=300;
--echo "Make it quite long"
TRUNCATE mysql.slow_log;
-SET SESSION long_query_time=1;
CREATE TABLE slow_event_test (slo_val tinyint, val tinyint);
+SET SESSION long_query_time=1;
--echo "This won't go to the slow log"
CREATE EVENT long_event ON SCHEDULE EVERY 1 MINUTE DO INSERT INTO slow_event_test SELECT @@long_query_time, SLEEP(3);
SELECT * FROM slow_event_test;
diff --git a/mysql-test/t/federated.test b/mysql-test/t/federated.test
index 5f5c8d44f35..032e2536f0a 100644
--- a/mysql-test/t/federated.test
+++ b/mysql-test/t/federated.test
@@ -1256,6 +1256,10 @@ SELECT LAST_INSERT_ID();
INSERT INTO federated.t1 VALUES ();
SELECT LAST_INSERT_ID();
SELECT * FROM federated.t1;
+DROP TABLE federated.t1;
+
+connection slave;
+DROP TABLE federated.t1;
#
# Bug#17377 Federated Engine returns wrong Data, always the rows
@@ -1310,7 +1314,6 @@ select * from federated.t1 where fld_parentid=0 and fld_delt=0;
DROP TABLE federated.t1;
connection slave;
DROP TABLE federated.bug_17377_table;
-DROP TABLE federated.t1;
#
# Test multi updates and deletes without keys
@@ -1363,4 +1366,120 @@ drop table federated.t1, federated.t2;
connection master;
--enable_parsing
---source include/federated_cleanup.inc
+#
+# Additional test for bug#18437 "Wrong values inserted with a before
+# update trigger on NDB table". SQL-layer didn't properly inform
+# handler about fields which were read and set in triggers. In some
+# cases this resulted in incorrect (garbage) values of OLD variables
+# and lost changes to NEW variables.
+# Since for federated engine only operation which is affected by wrong
+# fields mark-up is handler::write_row() this file constains coverage
+# for ON INSERT triggers only. Tests for other types of triggers reside
+# in ndb_trigger.test.
+#
+connection slave;
+--disable_warnings
+drop table if exists federated.t1;
+--enable_warnings
+create table federated.t1 (a int, b int, c int);
+connection master;
+--disable_warnings
+drop table if exists federated.t1;
+drop table if exists federated.t2;
+--enable_warnings
+--replace_result $SLAVE_MYPORT SLAVE_PORT
+eval create table federated.t1 (a int, b int, c int) engine=federated connection='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1';
+create trigger federated.t1_bi before insert on federated.t1 for each row set new.c= new.a * new.b;
+create table federated.t2 (a int, b int);
+insert into federated.t2 values (13, 17), (19, 23);
+# Each of three statements should correctly set values for all three fields
+# insert
+insert into federated.t1 (a, b) values (1, 2), (3, 5), (7, 11);
+select * from federated.t1 order by a;
+delete from federated.t1;
+# insert ... select
+insert into federated.t1 (a, b) select * from federated.t2;
+select * from federated.t1 order by a;
+delete from federated.t1;
+# load
+load data infile '../std_data_ln/loaddata5.dat' into table federated.t1 fields terminated by '' enclosed by '' ignore 1 lines (a, b);
+select * from federated.t1 order by a;
+drop tables federated.t1, federated.t2;
+
+connection slave;
+drop table federated.t1;
+
+#
+# BUG 19773 Crash when using multi-table updates, deletes
+# with federated tables
+#
+connection slave;
+create table federated.t1 (i1 int, i2 int, i3 int);
+create table federated.t2 (id int, c1 varchar(20), c2 varchar(20));
+
+connection master;
+--replace_result $SLAVE_MYPORT SLAVE_PORT
+eval create table federated.t1 (i1 int, i2 int, i3 int) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1';
+--replace_result $SLAVE_MYPORT SLAVE_PORT
+eval create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t2';
+insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2);
+insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test");
+select * from federated.t1 order by i1;
+select * from federated.t2;
+update federated.t1,federated.t2 set t1.i2=15, t2.c2="ppc" where t1.i1=t2.id;
+select * from federated.t1 order by i1;
+select * from federated.t2 order by id;
+delete federated.t1.*,federated.t2.* from federated.t1,federated.t2 where t1.i2=t2.id;
+select * from federated.t1 order by i1;
+select * from federated.t2 order by id;
+drop table federated.t1, federated.t2;
+connection slave;
+drop table federated.t1, federated.t2;
+
+# Test multi updates and deletes with keys
+connection slave;
+create table federated.t1 (i1 int, i2 int, i3 int, primary key (i1));
+create table federated.t2 (id int, c1 varchar(20), c2 varchar(20), primary key (id));
+
+connection master;
+--replace_result $SLAVE_MYPORT SLAVE_PORT
+eval create table federated.t1 (i1 int auto_increment not null, i2 int, i3 int, primary key (i1)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1';
+--replace_result $SLAVE_MYPORT SLAVE_PORT
+eval create table federated.t2 (id int auto_increment not null, c1 varchar(20), c2 varchar(20), primary key(id)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t2';
+insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2);
+insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test");
+select * from federated.t1 order by i1;
+select * from federated.t2 order by id;
+update federated.t1,federated.t2 set t1.i2=15, t2.c2="ppc" where t1.i1=t2.id;
+select * from federated.t1 order by i1;
+select * from federated.t2 order by id;
+delete federated.t1.*,federated.t2.* from federated.t1,federated.t2 where t1.i2=t2.id;
+select * from federated.t1 order by i1;
+select * from federated.t2 order by id;
+drop table federated.t1, federated.t2;
+
+connection slave;
+drop table federated.t1, federated.t2;
+#
+# Bug #16494: Updates that set a column to NULL fail sometimes
+#
+connection slave;
+create table t1 (id int not null auto_increment primary key, val int);
+connection master;
+--replace_result $SLAVE_MYPORT SLAVE_PORT
+eval create table t1
+ (id int not null auto_increment primary key, val int) engine=federated
+ connection='mysql://root@127.0.0.1:$SLAVE_MYPORT/test/t1';
+insert into t1 values (1,0),(2,0);
+update t1 set val = NULL where id = 1;
+select * from t1;
+connection slave;
+select * from t1;
+drop table t1;
+connection master;
+drop table t1;
+
+--echo End of 5.0 tests
+
+source include/federated_cleanup.inc;
+
diff --git a/mysql-test/t/func_gconcat.test b/mysql-test/t/func_gconcat.test
index 7fd7edddf28..7a0d90961e4 100644
--- a/mysql-test/t/func_gconcat.test
+++ b/mysql-test/t/func_gconcat.test
@@ -433,3 +433,17 @@ create table t1 (c1 varchar(10), c2 int);
select charset(group_concat(c1 order by c2)) from t1;
drop table t1;
+#
+# Bug #16712: group_concat returns odd string instead of intended result
+#
+CREATE TABLE t1 (a INT(10), b LONGTEXT, PRIMARY KEY (a));
+
+SET GROUP_CONCAT_MAX_LEN = 20000000;
+
+INSERT INTO t1 VALUES (1,REPEAT(CONCAT('A',CAST(CHAR(0) AS BINARY),'B'), 40000));
+INSERT INTO t1 SELECT a + 1, b FROM t1;
+
+SELECT a, CHAR_LENGTH(b) FROM t1;
+SELECT CHAR_LENGTH( GROUP_CONCAT(b) ) FROM t1;
+SET GROUP_CONCAT_MAX_LEN = 1024;
+DROP TABLE t1;
diff --git a/mysql-test/t/func_group.test b/mysql-test/t/func_group.test
index e8c5fa18a25..2806ffb5ae0 100644
--- a/mysql-test/t/func_group.test
+++ b/mysql-test/t/func_group.test
@@ -378,13 +378,15 @@ explain
select concat(min(t1.a1),min(t2.a4)) from t1, t2 where t2.a4 <> 'AME';
drop table t1, t2;
---disable_warnings
-create table t1 (USR_ID integer not null, MAX_REQ integer not null, constraint PK_SEA_USER primary key (USR_ID)) engine=InnoDB;
---enable_warnings
-insert into t1 values (1, 3);
-select count(*) + MAX_REQ - MAX_REQ + MAX_REQ - MAX_REQ + MAX_REQ - MAX_REQ + MAX_REQ - MAX_REQ + MAX_REQ - MAX_REQ from t1 group by MAX_REQ;
-select Case When Count(*) < MAX_REQ Then 1 Else 0 End from t1 where t1.USR_ID = 1 group by MAX_REQ;
-drop table t1;
+# Moved to func_group_innodb
+#--disable_warnings
+#create table t1 (USR_ID integer not null, MAX_REQ integer not null, constraint PK_SEA_USER primary key (USR_ID)) engine=InnoDB;
+#--enable_warnings
+#insert into t1 values (1, 3);
+#select count(*) + MAX_REQ - MAX_REQ + MAX_REQ - MAX_REQ + MAX_REQ - MAX_REQ + MAX_REQ - MAX_REQ + MAX_REQ - MAX_REQ from t1 group by MAX_REQ;
+#select Case When Count(*) < MAX_REQ Then 1 Else 0 End from t1 where t1.USR_ID = 1 group by MAX_REQ;
+#drop table t1;
+
create table t1 (a char(10));
insert into t1 values ('a'),('b'),('c');
@@ -539,6 +541,11 @@ INSERT INTO t1 VALUES
SELECT MAX(id) FROM t1 WHERE id < 3 AND a=2 AND b=6;
DROP TABLE t1;
+
+#
+# Bug #12882 min/max inconsistent on empty table
+#
+# Test case moved to func_group_innodb
#
# Bug #18206: min/max optimization cannot be applied to partial index
#
@@ -660,3 +667,18 @@ SELECT SUM(a) FROM t1 GROUP BY b/c;
DROP TABLE t1;
set div_precision_increment= @sav_dpi;
+#
+# Bug #20868: Client connection is broken on SQL query error
+#
+CREATE TABLE t1 (a INT PRIMARY KEY, b INT);
+INSERT INTO t1 VALUES (1,1), (2,2);
+
+CREATE TABLE t2 (a INT PRIMARY KEY, b INT);
+INSERT INTO t2 VALUES (1,1), (3,3);
+
+SELECT SQL_NO_CACHE
+ (SELECT SUM(c.a) FROM t1 ttt, t2 ccc
+ WHERE ttt.a = ccc.b AND ttt.a = t.a GROUP BY ttt.a) AS minid
+FROM t1 t, t2 c WHERE t.a = c.b;
+
+DROP TABLE t1,t2;
diff --git a/mysql-test/t/func_group_innodb.test b/mysql-test/t/func_group_innodb.test
new file mode 100644
index 00000000000..1bdfd8f54bb
--- /dev/null
+++ b/mysql-test/t/func_group_innodb.test
@@ -0,0 +1,85 @@
+#
+# Test of group functions that depend on innodb
+#
+
+--source include/have_innodb.inc
+
+--disable_warnings
+create table t1 (USR_ID integer not null, MAX_REQ integer not null, constraint PK_SEA_USER primary key (USR_ID)) engine=InnoDB;
+--enable_warnings
+insert into t1 values (1, 3);
+select count(*) + MAX_REQ - MAX_REQ + MAX_REQ - MAX_REQ + MAX_REQ - MAX_REQ + MAX_REQ - MAX_REQ + MAX_REQ - MAX_REQ from t1 group by MAX_REQ;
+select Case When Count(*) < MAX_REQ Then 1 Else 0 End from t1 where t1.USR_ID = 1 group by MAX_REQ;
+drop table t1;
+
+
+#
+# Bug #12882 min/max inconsistent on empty table
+#
+
+--disable_warnings
+create table t1m (a int) engine=myisam;
+create table t1i (a int) engine=innodb;
+create table t2m (a int) engine=myisam;
+create table t2i (a int) engine=innodb;
+--enable_warnings
+insert into t2m values (5);
+insert into t2i values (5);
+
+# test with MyISAM
+select min(a) from t1m;
+select min(7) from t1m;
+select min(7) from DUAL;
+explain select min(7) from t2m join t1m;
+select min(7) from t2m join t1m;
+
+select max(a) from t1m;
+select max(7) from t1m;
+select max(7) from DUAL;
+explain select max(7) from t2m join t1m;
+select max(7) from t2m join t1m;
+
+select 1, min(a) from t1m where a=99;
+select 1, min(a) from t1m where 1=99;
+select 1, min(1) from t1m where a=99;
+select 1, min(1) from t1m where 1=99;
+
+select 1, max(a) from t1m where a=99;
+select 1, max(a) from t1m where 1=99;
+select 1, max(1) from t1m where a=99;
+select 1, max(1) from t1m where 1=99;
+
+# test with InnoDB
+select min(a) from t1i;
+select min(7) from t1i;
+select min(7) from DUAL;
+explain select min(7) from t2i join t1i;
+select min(7) from t2i join t1i;
+
+select max(a) from t1i;
+select max(7) from t1i;
+select max(7) from DUAL;
+explain select max(7) from t2i join t1i;
+select max(7) from t2i join t1i;
+
+select 1, min(a) from t1i where a=99;
+select 1, min(a) from t1i where 1=99;
+select 1, min(1) from t1i where a=99;
+select 1, min(1) from t1i where 1=99;
+
+select 1, max(a) from t1i where a=99;
+select 1, max(a) from t1i where 1=99;
+select 1, max(1) from t1i where a=99;
+select 1, max(1) from t1i where 1=99;
+
+# mixed MyISAM/InnoDB test
+explain select count(*), min(7), max(7) from t1m, t1i;
+select count(*), min(7), max(7) from t1m, t1i;
+
+explain select count(*), min(7), max(7) from t1m, t2i;
+select count(*), min(7), max(7) from t1m, t2i;
+
+explain select count(*), min(7), max(7) from t2m, t1i;
+select count(*), min(7), max(7) from t2m, t1i;
+
+drop table t1m, t1i, t2m, t2i;
diff --git a/mysql-test/t/func_sapdb.test b/mysql-test/t/func_sapdb.test
index 6189712b5fe..97101fba615 100644
--- a/mysql-test/t/func_sapdb.test
+++ b/mysql-test/t/func_sapdb.test
@@ -43,6 +43,8 @@ select weekofyear("1997-11-30 23:59:59.000001");
select makedate(1997,1);
select makedate(1997,0);
+select makedate(9999,365);
+select makedate(9999,366);
#Time functions
diff --git a/mysql-test/t/func_str.test b/mysql-test/t/func_str.test
index b13fe039261..8753db0ebe1 100644
--- a/mysql-test/t/func_str.test
+++ b/mysql-test/t/func_str.test
@@ -19,6 +19,11 @@ select hex(char(256));
select locate('he','hello'),locate('he','hello',2),locate('lo','hello',2) ;
select instr('hello','HE'), instr('hello',binary 'HE'), instr(binary 'hello','HE');
select position(binary 'll' in 'hello'),position('a' in binary 'hello');
+#
+# Bug#11728 string function LEFT,
+# strange undocumented behaviour, strict mode
+#
+select left('hello',null), right('hello',null);
select left('hello',2),right('hello',2),substring('hello',2,2),mid('hello',1,5) ;
select concat('',left(right(concat('what ',concat('is ','happening')),9),4),'',substring('monty',5,1)) ;
select substring_index('www.tcx.se','.',-2),substring_index('www.tcx.se','.',1);
@@ -685,6 +690,38 @@ select * from t1 where f1='test' and (f2= sha("test") or f2= sha("TEST"));
select * from t1 where f1='test' and (f2= sha("TEST") or f2= sha("test"));
drop table t1;
+#
+# Bug#18243: REVERSE changes its argument
+#
+
+CREATE TABLE t1 (a varchar(10));
+INSERT INTO t1 VALUES ('abc'), ('xyz');
+
+SELECT a, CONCAT(a,' ',a) AS c FROM t1
+ HAVING LEFT(c,LENGTH(c)-INSTR(REVERSE(c)," ")) = a;
+
+SELECT a, CONCAT(a,' ',a) AS c FROM t1
+ HAVING LEFT(CONCAT(a,' ',a),
+ LENGTH(CONCAT(a,' ',a))-
+ INSTR(REVERSE(CONCAT(a,' ',a))," ")) = a;
+
+DROP TABLE t1;
+
+#
+# Bug#17526: WRONG PRINT for TRIM FUNCTION with two arguments
+#
+
+CREATE TABLE t1 (s varchar(10));
+INSERT INTO t1 VALUES ('yadda'), ('yaddy');
+
+EXPLAIN EXTENDED SELECT s FROM t1 WHERE TRIM(s) > 'ab';
+EXPLAIN EXTENDED SELECT s FROM t1 WHERE TRIM('y' FROM s) > 'ab';
+EXPLAIN EXTENDED SELECT s FROM t1 WHERE TRIM(LEADING 'y' FROM s) > 'ab';
+EXPLAIN EXTENDED SELECT s FROM t1 WHERE TRIM(TRAILING 'y' FROM s) > 'ab';
+EXPLAIN EXTENDED SELECT s FROM t1 WHERE TRIM(BOTH 'y' FROM s) > 'ab';
+
+DROP TABLE t1;
+
--echo End of 4.1 tests
#
diff --git a/mysql-test/t/func_time.test b/mysql-test/t/func_time.test
index 355b64e0405..042ad178fc8 100644
--- a/mysql-test/t/func_time.test
+++ b/mysql-test/t/func_time.test
@@ -5,6 +5,9 @@
drop table if exists t1,t2,t3;
--enable_warnings
+# Set timezone to GMT-3, to make it possible to use "interval 3 hour"
+set time_zone="+03:00";
+
select from_days(to_days("960101")),to_days(960201)-to_days("19960101"),to_days(date_add(curdate(), interval 1 day))-to_days(curdate()),weekday("1997-11-29");
select period_add("9602",-12),period_diff(199505,"9404") ;
@@ -155,7 +158,11 @@ SELECT EXTRACT(QUARTER FROM '2004-09-15') AS quarter;
SELECT EXTRACT(QUARTER FROM '2004-10-15') AS quarter;
SELECT EXTRACT(QUARTER FROM '2004-11-15') AS quarter;
SELECT EXTRACT(QUARTER FROM '2004-12-15') AS quarter;
-
+#
+# MySQL Bugs: #12356: DATE_SUB or DATE_ADD incorrectly returns null
+#
+SELECT DATE_SUB(str_to_date('9999-12-31 00:01:00','%Y-%m-%d %H:%i:%s'), INTERVAL 1 MINUTE);
+SELECT DATE_ADD(str_to_date('9999-12-30 23:59:00','%Y-%m-%d %H:%i:%s'), INTERVAL 1 MINUTE);
#
# Test big intervals (Bug #3498)
@@ -335,6 +342,7 @@ select last_day("1997-12-1")+0.0;
# Test SAPDB UTC_% functions. This part is TZ dependant (It is supposed that
# TZ variable set to GMT-3
+
select strcmp(date_sub(localtimestamp(), interval 3 hour), utc_timestamp())=0;
select strcmp(date_format(date_sub(localtimestamp(), interval 3 hour),"%T"), utc_time())=0;
select strcmp(date_format(date_sub(localtimestamp(), interval 3 hour),"%Y-%m-%d"), utc_date())=0;
@@ -505,6 +513,9 @@ SELECT * FROM t1, t2
DROP TABLE t1,t2;
+
+# Restore timezone to default
+set time_zone= @@global.time_zone;
--echo End of 5.0 tests
#
diff --git a/mysql-test/t/func_timestamp.test b/mysql-test/t/func_timestamp.test
index e1bb7e878ee..05a91b06d28 100644
--- a/mysql-test/t/func_timestamp.test
+++ b/mysql-test/t/func_timestamp.test
@@ -6,6 +6,9 @@
drop table if exists t1;
--enable_warnings
+# Set timezone to GMT-3, to make it possible to use "interval 3 hour"
+set time_zone="+03:00";
+
create table t1 (Zeit time, Tag tinyint not null, Monat tinyint not null,
Jahr smallint not null, index(Tag), index(Monat), index(Jahr) );
insert into t1 values ("09:26:00",16,9,1998),("09:26:00",16,9,1998);
@@ -15,3 +18,6 @@ FROM t1;
drop table t1;
# End of 4.1 tests
+
+# Restore timezone to default
+set time_zone= @@global.time_zone;
diff --git a/mysql-test/t/gis-rtree.test b/mysql-test/t/gis-rtree.test
index 02e45861706..163f2806ad2 100644
--- a/mysql-test/t/gis-rtree.test
+++ b/mysql-test/t/gis-rtree.test
@@ -187,4 +187,48 @@ check table t1 extended;
drop table t1;
+#
+# Bug#17877 - Corrupted spatial index
+#
+CREATE TABLE t1 (
+ c1 geometry NOT NULL default '',
+ SPATIAL KEY i1 (c1(32))
+) ENGINE=MyISAM DEFAULT CHARSET=latin1;
+INSERT INTO t1 (c1) VALUES (
+ PolygonFromText('POLYGON((-18.6086111000 -66.9327777000,
+ -18.6055555000 -66.8158332999,
+ -18.7186111000 -66.8102777000,
+ -18.7211111000 -66.9269443999,
+ -18.6086111000 -66.9327777000))'));
+# This showed a missing key.
+CHECK TABLE t1 EXTENDED;
+DROP TABLE t1;
+#
+CREATE TABLE t1 (
+ c1 geometry NOT NULL default '',
+ SPATIAL KEY i1 (c1(32))
+) ENGINE=MyISAM DEFAULT CHARSET=latin1;
+INSERT INTO t1 (c1) VALUES (
+ PolygonFromText('POLYGON((-18.6086111000 -66.9327777000,
+ -18.6055555000 -66.8158332999,
+ -18.7186111000 -66.8102777000,
+ -18.7211111000 -66.9269443999,
+ -18.6086111000 -66.9327777000))'));
+INSERT INTO t1 (c1) VALUES (
+ PolygonFromText('POLYGON((-65.7402776999 -96.6686111000,
+ -65.7372222000 -96.5516666000,
+ -65.8502777000 -96.5461111000,
+ -65.8527777000 -96.6627777000,
+ -65.7402776999 -96.6686111000))'));
+# This is the same as the first insert to get a non-unique key.
+INSERT INTO t1 (c1) VALUES (
+ PolygonFromText('POLYGON((-18.6086111000 -66.9327777000,
+ -18.6055555000 -66.8158332999,
+ -18.7186111000 -66.8102777000,
+ -18.7211111000 -66.9269443999,
+ -18.6086111000 -66.9327777000))'));
+# This showed (and still shows) OK.
+CHECK TABLE t1 EXTENDED;
+DROP TABLE t1;
+
# End of 4.1 tests
diff --git a/mysql-test/t/gis.test b/mysql-test/t/gis.test
index bb3f621d194..4c6ff9b2fe7 100644
--- a/mysql-test/t/gis.test
+++ b/mysql-test/t/gis.test
@@ -409,3 +409,10 @@ create table t1(pt GEOMETRY);
alter table t1 add primary key pti(pt);
alter table t1 add primary key pti(pt(20));
drop table t1;
+
+--enable_metadata
+create table t1 (g GEOMETRY);
+select * from t1;
+select asbinary(g) from t1;
+--disable_metadata
+drop table t1;
diff --git a/mysql-test/t/group_by.test b/mysql-test/t/group_by.test
index fb9835c5d7f..ce1e4e59600 100644
--- a/mysql-test/t/group_by.test
+++ b/mysql-test/t/group_by.test
@@ -632,3 +632,26 @@ group by t1.c1;
show warnings;
drop table t1, t2;
+#
+# Bug #20466: a view is mixing data when there's a trigger on the table
+#
+CREATE TABLE t1 (a tinyint(3), b varchar(255), PRIMARY KEY (a));
+
+INSERT INTO t1 VALUES (1,'-----'), (6,'Allemagne'), (17,'Autriche'),
+ (25,'Belgique'), (54,'Danemark'), (62,'Espagne'), (68,'France');
+
+CREATE TABLE t2 (a tinyint(3), b tinyint(3), PRIMARY KEY (a), KEY b (b));
+
+INSERT INTO t2 VALUES (1,1), (2,1), (6,6), (18,17), (15,25), (16,25),
+ (17,25), (10,54), (5,62),(3,68);
+
+CREATE VIEW v1 AS select t1.a, concat(t1.b,'') AS b, t1.b as real_b from t1;
+
+explain
+SELECT straight_join sql_no_cache v1.a, v1.b, v1.real_b from t2, v1
+where t2.b=v1.a GROUP BY t2.b;
+SELECT straight_join sql_no_cache v1.a, v1.b, v1.real_b from t2, v1
+where t2.b=v1.a GROUP BY t2.b;
+
+DROP VIEW v1;
+DROP TABLE t1,t2;
diff --git a/mysql-test/t/group_min_max.test b/mysql-test/t/group_min_max.test
index 8ba681d8e5f..0354fc465af 100644
--- a/mysql-test/t/group_min_max.test
+++ b/mysql-test/t/group_min_max.test
@@ -659,7 +659,32 @@ select a1 from t1 where a2 = 'b' group by a1;
explain select distinct a1 from t1 where a2 = 'b';
select distinct a1 from t1 where a2 = 'b';
+#
+# Bug #12672: primary key implcitly included in every innodb index
+#
+# Test case moved to group_min_max_innodb
+
+
+#
+# Bug #6142: a problem with the empty innodb table
+#
+# Test case moved to group_min_max_innodb
+
+
+#
+# Bug #9798: group by with rollup
+#
+# Test case moved to group_min_max_innodb
+
+
+#
+# Bug #13293 Wrongly used index results in endless loop.
+#
+# Test case moved to group_min_max_innodb
+
+
drop table t1,t2,t3;
+
#
# Bug #14920 Ordering aggregated result sets with composite primary keys
# corrupts resultset
@@ -746,3 +771,51 @@ EXPLAIN SELECT DISTINCT a,a FROM t1 ORDER BY a;
SELECT DISTINCT a,a FROM t1 ORDER BY a;
DROP TABLE t1;
+
+#
+# Bug #21007: NATURAL JOIN (any JOIN (2 x NATURAL JOIN)) crashes the server
+#
+
+CREATE TABLE t1 (id1 INT, id2 INT);
+CREATE TABLE t2 (id2 INT, id3 INT, id5 INT);
+CREATE TABLE t3 (id3 INT, id4 INT);
+CREATE TABLE t4 (id4 INT);
+CREATE TABLE t5 (id5 INT, id6 INT);
+CREATE TABLE t6 (id6 INT);
+
+INSERT INTO t1 VALUES(1,1);
+INSERT INTO t2 VALUES(1,1,1);
+INSERT INTO t3 VALUES(1,1);
+INSERT INTO t4 VALUES(1);
+INSERT INTO t5 VALUES(1,1);
+INSERT INTO t6 VALUES(1);
+
+-- original bug query
+SELECT * FROM
+t1
+ NATURAL JOIN
+(t2 JOIN (t3 NATURAL JOIN t4, t5 NATURAL JOIN t6)
+ ON (t3.id3 = t2.id3 AND t5.id5 = t2.id5));
+
+-- inner join swapped
+SELECT * FROM
+t1
+ NATURAL JOIN
+(((t3 NATURAL JOIN t4) join (t5 NATURAL JOIN t6) on t3.id4 = t5.id5) JOIN t2
+ ON (t3.id3 = t2.id3 AND t5.id5 = t2.id5));
+
+-- one join less, no ON cond
+SELECT * FROM t1 NATURAL JOIN ((t3 join (t5 NATURAL JOIN t6)) JOIN t2);
+
+-- wrong error message: 'id2' - ambiguous column
+SELECT * FROM
+(t2 JOIN (t3 NATURAL JOIN t4, t5 NATURAL JOIN t6)
+ ON (t3.id3 = t2.id3 AND t5.id5 = t2.id5))
+ NATURAL JOIN
+t1;
+SELECT * FROM
+(t2 JOIN ((t3 NATURAL JOIN t4) join (t5 NATURAL JOIN t6)))
+ NATURAL JOIN
+t1;
+
+DROP TABLE t1,t2,t3,t4,t5,t6;
diff --git a/mysql-test/t/group_min_max_innodb.test b/mysql-test/t/group_min_max_innodb.test
new file mode 100644
index 00000000000..ea2a603a8a4
--- /dev/null
+++ b/mysql-test/t/group_min_max_innodb.test
@@ -0,0 +1,95 @@
+#
+# Test file for WL#1724 (Min/Max Optimization for Queries with Group By Clause).
+# The queries in this file test query execution via QUICK_GROUP_MIN_MAX_SELECT
+# that depends on InnoDB
+#
+
+--source include/have_innodb.inc
+
+#
+# Bug #12672: primary key implcitly included in every innodb index
+#
+
+--disable_warnings
+create table t4 (
+ pk_col int auto_increment primary key, a1 char(64), a2 char(64), b char(16), c char(16) not null, d char(16), dummy char(64) default ' '
+) engine=innodb;
+--enable_warnings
+
+insert into t4 (a1, a2, b, c, d) values
+('a','a','a','a111','xy1'),('a','a','a','b111','xy2'),('a','a','a','c111','xy3'),('a','a','a','d111','xy4'),
+('a','a','b','e112','xy1'),('a','a','b','f112','xy2'),('a','a','b','g112','xy3'),('a','a','b','h112','xy4'),
+('a','b','a','i121','xy1'),('a','b','a','j121','xy2'),('a','b','a','k121','xy3'),('a','b','a','l121','xy4'),
+('a','b','b','m122','xy1'),('a','b','b','n122','xy2'),('a','b','b','o122','xy3'),('a','b','b','p122','xy4'),
+('b','a','a','a211','xy1'),('b','a','a','b211','xy2'),('b','a','a','c211','xy3'),('b','a','a','d211','xy4'),
+('b','a','b','e212','xy1'),('b','a','b','f212','xy2'),('b','a','b','g212','xy3'),('b','a','b','h212','xy4'),
+('b','b','a','i221','xy1'),('b','b','a','j221','xy2'),('b','b','a','k221','xy3'),('b','b','a','l221','xy4'),
+('b','b','b','m222','xy1'),('b','b','b','n222','xy2'),('b','b','b','o222','xy3'),('b','b','b','p222','xy4'),
+('c','a','a','a311','xy1'),('c','a','a','b311','xy2'),('c','a','a','c311','xy3'),('c','a','a','d311','xy4'),
+('c','a','b','e312','xy1'),('c','a','b','f312','xy2'),('c','a','b','g312','xy3'),('c','a','b','h312','xy4'),
+('c','b','a','i321','xy1'),('c','b','a','j321','xy2'),('c','b','a','k321','xy3'),('c','b','a','l321','xy4'),
+('c','b','b','m322','xy1'),('c','b','b','n322','xy2'),('c','b','b','o322','xy3'),('c','b','b','p322','xy4'),
+('d','a','a','a411','xy1'),('d','a','a','b411','xy2'),('d','a','a','c411','xy3'),('d','a','a','d411','xy4'),
+('d','a','b','e412','xy1'),('d','a','b','f412','xy2'),('d','a','b','g412','xy3'),('d','a','b','h412','xy4'),
+('d','b','a','i421','xy1'),('d','b','a','j421','xy2'),('d','b','a','k421','xy3'),('d','b','a','l421','xy4'),
+('d','b','b','m422','xy1'),('d','b','b','n422','xy2'),('d','b','b','o422','xy3'),('d','b','b','p422','xy4'),
+('a','a','a','a111','xy1'),('a','a','a','b111','xy2'),('a','a','a','c111','xy3'),('a','a','a','d111','xy4'),
+('a','a','b','e112','xy1'),('a','a','b','f112','xy2'),('a','a','b','g112','xy3'),('a','a','b','h112','xy4'),
+('a','b','a','i121','xy1'),('a','b','a','j121','xy2'),('a','b','a','k121','xy3'),('a','b','a','l121','xy4'),
+('a','b','b','m122','xy1'),('a','b','b','n122','xy2'),('a','b','b','o122','xy3'),('a','b','b','p122','xy4'),
+('b','a','a','a211','xy1'),('b','a','a','b211','xy2'),('b','a','a','c211','xy3'),('b','a','a','d211','xy4'),
+('b','a','b','e212','xy1'),('b','a','b','f212','xy2'),('b','a','b','g212','xy3'),('b','a','b','h212','xy4'),
+('b','b','a','i221','xy1'),('b','b','a','j221','xy2'),('b','b','a','k221','xy3'),('b','b','a','l221','xy4'),
+('b','b','b','m222','xy1'),('b','b','b','n222','xy2'),('b','b','b','o222','xy3'),('b','b','b','p222','xy4'),
+('c','a','a','a311','xy1'),('c','a','a','b311','xy2'),('c','a','a','c311','xy3'),('c','a','a','d311','xy4'),
+('c','a','b','e312','xy1'),('c','a','b','f312','xy2'),('c','a','b','g312','xy3'),('c','a','b','h312','xy4'),
+('c','b','a','i321','xy1'),('c','b','a','j321','xy2'),('c','b','a','k321','xy3'),('c','b','a','l321','xy4'),
+('c','b','b','m322','xy1'),('c','b','b','n322','xy2'),('c','b','b','o322','xy3'),('c','b','b','p322','xy4'),
+('d','a','a','a411','xy1'),('d','a','a','b411','xy2'),('d','a','a','c411','xy3'),('d','a','a','d411','xy4'),
+('d','a','b','e412','xy1'),('d','a','b','f412','xy2'),('d','a','b','g412','xy3'),('d','a','b','h412','xy4'),
+('d','b','a','i421','xy1'),('d','b','a','j421','xy2'),('d','b','a','k421','xy3'),('d','b','a','l421','xy4'),
+('d','b','b','m422','xy1'),('d','b','b','n422','xy2'),('d','b','b','o422','xy3'),('d','b','b','p422','xy4');
+
+create index idx12672_0 on t4 (a1);
+create index idx12672_1 on t4 (a1,a2,b,c);
+create index idx12672_2 on t4 (a1,a2,b);
+analyze table t4;
+
+select distinct a1 from t4 where pk_col not in (1,2,3,4);
+
+drop table t4;
+
+
+#
+# Bug #6142: a problem with the empty innodb table
+#
+
+--disable_warnings
+create table t1 (
+ a varchar(30), b varchar(30), primary key(a), key(b)
+) engine=innodb;
+--enable_warnings
+select distinct a from t1;
+drop table t1;
+
+#
+# Bug #9798: group by with rollup
+#
+
+--disable_warnings
+create table t1(a int, key(a)) engine=innodb;
+--enable_warnings
+insert into t1 values(1);
+select a, count(a) from t1 group by a with rollup;
+drop table t1;
+
+
+#
+# Bug #13293 Wrongly used index results in endless loop.
+#
+create table t1 (f1 int, f2 char(1), primary key(f1,f2)) engine=innodb;
+insert into t1 values ( 1,"e"),(2,"a"),( 3,"c"),(4,"d");
+alter table t1 drop primary key, add primary key (f2, f1);
+explain select distinct f1 a, f1 b from t1;
+explain select distinct f1, f2 from t1;
+drop table t1;
diff --git a/mysql-test/t/information_schema.test b/mysql-test/t/information_schema.test
index 73aea01dfa2..cfc51376e25 100644
--- a/mysql-test/t/information_schema.test
+++ b/mysql-test/t/information_schema.test
@@ -859,7 +859,83 @@ select concat(@a, table_name), @a, table_name
from information_schema.tables where table_schema = 'test';
drop table t1,t2;
-# End of 5.0 tests.
+#
+# Bug#20230: routine_definition is not null
+#
+--disable_warnings
+DROP PROCEDURE IF EXISTS p1;
+DROP FUNCTION IF EXISTS f1;
+--enable_warnings
+
+CREATE PROCEDURE p1() SET @a= 1;
+CREATE FUNCTION f1() RETURNS INT RETURN @a + 1;
+CREATE USER mysql_bug20230@localhost;
+GRANT EXECUTE ON PROCEDURE p1 TO mysql_bug20230@localhost;
+GRANT EXECUTE ON FUNCTION f1 TO mysql_bug20230@localhost;
+
+SELECT ROUTINE_NAME, ROUTINE_DEFINITION FROM INFORMATION_SCHEMA.ROUTINES;
+SHOW CREATE PROCEDURE p1;
+SHOW CREATE FUNCTION f1;
+
+connect (conn1, localhost, mysql_bug20230,,);
+
+SELECT ROUTINE_NAME, ROUTINE_DEFINITION FROM INFORMATION_SCHEMA.ROUTINES;
+SHOW CREATE PROCEDURE p1;
+SHOW CREATE FUNCTION f1;
+CALL p1();
+SELECT f1();
+
+disconnect conn1;
+connection default;
+
+DROP FUNCTION f1;
+DROP PROCEDURE p1;
+DROP USER mysql_bug20230@localhost;
+
+#
+# Bug#18925: subqueries with MIN/MAX functions on INFORMARTION_SCHEMA
+#
+
+SELECT t.table_name, c1.column_name
+ FROM information_schema.tables t
+ INNER JOIN
+ information_schema.columns c1
+ ON t.table_schema = c1.table_schema AND
+ t.table_name = c1.table_name
+ WHERE t.table_schema = 'information_schema' AND
+ c1.ordinal_position =
+ ( SELECT COALESCE(MIN(c2.ordinal_position),1)
+ FROM information_schema.columns c2
+ WHERE c2.table_schema = t.table_schema AND
+ c2.table_name = t.table_name AND
+ c2.column_name LIKE '%SCHEMA%'
+ );
+SELECT t.table_name, c1.column_name
+ FROM information_schema.tables t
+ INNER JOIN
+ information_schema.columns c1
+ ON t.table_schema = c1.table_schema AND
+ t.table_name = c1.table_name
+ WHERE t.table_schema = 'information_schema' AND
+ c1.ordinal_position =
+ ( SELECT COALESCE(MIN(c2.ordinal_position),1)
+ FROM information_schema.columns c2
+ WHERE c2.table_schema = 'information_schema' AND
+ c2.table_name = t.table_name AND
+ c2.column_name LIKE '%SCHEMA%'
+ );
+
+#
+# Bug#21231: query with a simple non-correlated subquery over
+# INFORMARTION_SCHEMA.TABLES
+#
+
+SELECT MAX(table_name) FROM information_schema.tables;
+SELECT table_name from information_schema.tables
+ WHERE table_name=(SELECT MAX(table_name)
+ FROM information_schema.tables);
+
+--echo End of 5.0 tests.
#
# Show engines
#
@@ -877,3 +953,5 @@ select user,db from information_schema.processlist;
connection default;
drop user user3148@localhost;
+--echo End of 5.1 tests.
+
diff --git a/mysql-test/t/init_connect.test b/mysql-test/t/init_connect.test
index 0ee6387d985..31a98df33df 100644
--- a/mysql-test/t/init_connect.test
+++ b/mysql-test/t/init_connect.test
@@ -35,4 +35,205 @@ select @a;
connection con0;
drop table t1;
-# End of 4.1 tests
+disconnect con1;
+disconnect con2;
+disconnect con3;
+disconnect con4;
+disconnect con5;
+
+--echo End of 4.1 tests
+#
+# Test 5.* features
+#
+
+create table t1 (x int);
+insert into t1 values (3), (5), (7);
+create table t2 (y int);
+
+create user mysqltest1@localhost;
+grant all privileges on test.* to mysqltest1@localhost;
+#
+# Create a simple procedure
+#
+set global init_connect="create procedure p1() select * from t1";
+connect (con1,localhost,mysqltest1,,);
+connection con1;
+call p1();
+drop procedure p1;
+
+connection con0;
+disconnect con1;
+#
+# Create a multi-result set procedure
+#
+set global init_connect="create procedure p1(x int)\
+begin\
+ select count(*) from t1;\
+ select * from t1;\
+ set @x = x;
+end";
+connect (con1,localhost,mysqltest1,,);
+connection con1;
+call p1(42);
+select @x;
+
+connection con0;
+disconnect con1;
+#
+# Just call it - this will not generate any output
+#
+set global init_connect="call p1(4711)";
+connect (con1,localhost,mysqltest1,,);
+connection con1;
+select @x;
+
+connection con0;
+disconnect con1;
+#
+# Drop the procedure
+#
+set global init_connect="drop procedure if exists p1";
+connect (con1,localhost,mysqltest1,,);
+connection con1;
+--error ER_SP_DOES_NOT_EXIST
+call p1();
+
+connection con0;
+disconnect con1;
+#
+# Execution of a more complex procedure
+#
+delimiter |;
+create procedure p1(out sum int)
+begin
+ declare n int default 0;
+ declare c cursor for select * from t1;
+ declare exit handler for not found
+ begin
+ close c;
+ set sum = n;
+ end;
+
+ open c;
+ loop
+ begin
+ declare x int;
+
+ fetch c into x;
+ if x > 3 then
+ set n = n + x;
+ end if;
+ end;
+ end loop;
+end|
+delimiter ;|
+# Call the procedure with a cursor
+set global init_connect="call p1(@sum)";
+connect (con1,localhost,mysqltest1,,);
+connection con1;
+select @sum;
+
+connection con0;
+disconnect con1;
+drop procedure p1;
+#
+# Test Dynamic SQL
+#
+delimiter |;
+create procedure p1(tbl char(10), v int)
+begin
+ set @s = concat('insert into ', tbl, ' values (?)');
+ set @v = v;
+ prepare stmt1 from @s;
+ execute stmt1 using @v;
+ deallocate prepare stmt1;
+end|
+delimiter ;|
+# Call the procedure with prepared statements
+set global init_connect="call p1('t1', 11)";
+connect (con1,localhost,mysqltest1,,);
+connection con1;
+select * from t1;
+
+connection con0;
+disconnect con1;
+drop procedure p1;
+#
+# Stored functions
+#
+delimiter |;
+create function f1() returns int
+begin
+ declare n int;
+
+ select count(*) into n from t1;
+ return n;
+end|
+delimiter ;|
+# Invoke a function
+set global init_connect="set @x = f1()";
+connect (con1,localhost,mysqltest1,,);
+connection con1;
+select @x;
+
+connection con0;
+disconnect con1;
+#
+# Create a view
+#
+set global init_connect="create view v1 as select f1()";
+connect (con1,localhost,mysqltest1,,);
+connection con1;
+select * from v1;
+
+connection con0;
+disconnect con1;
+#
+# Drop the view
+#
+set global init_connect="drop view v1";
+connect (con1,localhost,mysqltest1,,);
+connection con1;
+--error ER_NO_SUCH_TABLE
+select * from v1;
+
+connection con0;
+disconnect con1;
+drop function f1;
+
+# We can't test "create trigger", since this requires super privileges
+# in 5.0, but with super privileges, init_connect is not executed.
+# (However, this can be tested in 5.1)
+#
+#set global init_connect="create trigger trg1\
+# after insert on t2\
+# for each row\
+# insert into t1 values (new.y)";
+#connect (con1,localhost,mysqltest1,,);
+#connection con1;
+#insert into t2 values (2), (4);
+#select * from t1;
+#
+#connection con0;
+#disconnect con1;
+
+create trigger trg1
+ after insert on t2
+ for each row
+ insert into t1 values (new.y);
+
+# Invoke trigger
+set global init_connect="insert into t2 values (13), (17), (19)";
+connect (con1,localhost,mysqltest1,,);
+connection con1;
+select * from t1;
+
+connection con0;
+disconnect con1;
+
+drop trigger trg1;
+set global init_connect=default;
+
+revoke all privileges, grant option from mysqltest1@localhost;
+drop user mysqltest1@localhost;
+drop table t1, t2;
diff --git a/mysql-test/t/init_file.test b/mysql-test/t/init_file.test
index 8b4b788777b..6b5e032fd99 100644
--- a/mysql-test/t/init_file.test
+++ b/mysql-test/t/init_file.test
@@ -6,5 +6,15 @@
# mysql-test/t/init_file-master.opt for the actual test
#
-# End of 4.1 tests
-echo ok;
+--echo ok
+--echo end of 4.1 tests
+#
+# Chec 5.x features
+#
+# Expected:
+# 3, 5, 7, 11, 13
+select * from t1;
+# Expected:
+# 30, 3, 11, 13
+select * from t2;
+drop table t1, t2;
diff --git a/mysql-test/t/innodb.test b/mysql-test/t/innodb.test
index 5b1b374e487..d00c5499ef9 100644
--- a/mysql-test/t/innodb.test
+++ b/mysql-test/t/innodb.test
@@ -1079,7 +1079,7 @@ drop table t1,t2,t3;
#
create table t1 (id int, name char(10) not null, name2 char(10) not null) engine=innodb;
insert into t1 values(1,'first','fff'),(2,'second','sss'),(3,'third','ttt');
-select name2 from t1 union all select name from t1 union all select id from t1;
+select trim(name2) from t1 union all select trim(name) from t1 union all select trim(id) from t1;
drop table t1;
#
diff --git a/mysql-test/t/innodb_mysql.test b/mysql-test/t/innodb_mysql.test
index 65e2cc658f5..2be53b58a39 100644
--- a/mysql-test/t/innodb_mysql.test
+++ b/mysql-test/t/innodb_mysql.test
@@ -67,6 +67,38 @@ where
drop table t1, t2;
#
+# Bug#17212: results not sorted correctly by ORDER BY when using index
+# (repeatable only w/innodb because of index props)
+#
+CREATE TABLE t1 (a int, b int, KEY b (b)) Engine=InnoDB;
+CREATE TABLE t2 (a int, b int, PRIMARY KEY (a,b)) Engine=InnoDB;
+CREATE TABLE t3 (a int, b int, c int, PRIMARY KEY (a),
+ UNIQUE KEY b (b,c), KEY a (a,b,c)) Engine=InnoDB;
+
+INSERT INTO t1 VALUES (1, 1);
+INSERT INTO t1 SELECT a + 1, b + 1 FROM t1;
+INSERT INTO t1 SELECT a + 2, b + 2 FROM t1;
+
+INSERT INTO t2 VALUES (1,1),(1,2),(1,3),(1,4),(1,5),(1,6),(1,7),(1,8);
+INSERT INTO t2 SELECT a + 1, b FROM t2;
+DELETE FROM t2 WHERE a = 1 AND b < 2;
+
+INSERT INTO t3 VALUES (1,1,1),(2,1,2);
+INSERT INTO t3 SELECT a + 2, a + 2, 3 FROM t3;
+INSERT INTO t3 SELECT a + 4, a + 4, 3 FROM t3;
+
+# demonstrate a problem when a must-use-sort table flag
+# (sort_by_table=1) is being neglected.
+SELECT STRAIGHT_JOIN SQL_NO_CACHE t1.b, t1.a FROM t1, t3, t2 WHERE
+ t3.a = t2.a AND t2.b = t1.a AND t3.b = 1 AND t3.c IN (1, 2)
+ ORDER BY t1.b LIMIT 2;
+
+# demonstrate the problem described in the bug report
+SELECT STRAIGHT_JOIN SQL_NO_CACHE t1.b, t1.a FROM t1, t3, t2 WHERE
+ t3.a = t2.a AND t2.b = t1.a AND t3.b = 1 AND t3.c IN (1, 2)
+ ORDER BY t1.b LIMIT 5;
+DROP TABLE t1, t2, t3;
+#
# Bug #12882 min/max inconsistent on empty table
#
@@ -280,3 +312,11 @@ TRUNCATE table t2;
INSERT INTO t2 select * from t1;
SELECT * from t2;
drop table t1,t2;
+
+#
+# Bug#17530: Incorrect key truncation on table creation caused server crash.
+#
+create table t1(f1 varchar(800) binary not null, key(f1)) engine = innodb
+ character set utf8 collate utf8_general_ci;
+insert into t1 values('aaa');
+drop table t1;
diff --git a/mysql-test/t/insert.test b/mysql-test/t/insert.test
index 3711e2986ed..0cc25469705 100644
--- a/mysql-test/t/insert.test
+++ b/mysql-test/t/insert.test
@@ -234,3 +234,10 @@ select row_count();
insert into t1 values (5, 5) on duplicate key update data= data + 10;
select row_count();
drop table t1;
+
+# Test of INSERT IGNORE and re-using auto_increment values
+create table t1 (id int primary key auto_increment, data int, unique(data));
+insert ignore into t1 values(NULL,100),(NULL,110),(NULL,120);
+insert ignore into t1 values(NULL,10),(NULL,20),(NULL,110),(NULL,120),(NULL,100),(NULL,90);
+insert ignore into t1 values(NULL,130),(NULL,140),(500,110),(550,120),(450,100),(NULL,150);
+select * from t1 order by id;
diff --git a/mysql-test/t/join_outer.test b/mysql-test/t/join_outer.test
index dc4e240750c..3f82219fadb 100644
--- a/mysql-test/t/join_outer.test
+++ b/mysql-test/t/join_outer.test
@@ -761,24 +761,10 @@ DROP TABLE t1,t2;
#
# Test for bug #17164: ORed FALSE blocked conversion of outer join into join
-#
-
-CREATE TABLE t1 (id int(11) NOT NULL PRIMARY KEY, name varchar(20),
- INDEX (name)) ENGINE=InnoDB;
-CREATE TABLE t2 (id int(11) NOT NULL PRIMARY KEY, fkey int(11),
- FOREIGN KEY (fkey) REFERENCES t2(id)) ENGINE=InnoDB;
-INSERT INTO t1 VALUES (1,'A1'),(2,'A2'),(3,'B');
-INSERT INTO t2 VALUES (1,1),(2,2),(3,2),(4,3),(5,3);
-
-EXPLAIN
-SELECT COUNT(*) FROM t2 LEFT JOIN t1 ON t2.fkey = t1.id
- WHERE t1.name LIKE 'A%';
+#
-EXPLAIN
-SELECT COUNT(*) FROM t2 LEFT JOIN t1 ON t2.fkey = t1.id
- WHERE t1.name LIKE 'A%' OR FALSE;
+# Test case moved to join_outer_innodb
-DROP TABLE t1,t2;
#
# Bug 19396: LEFT OUTER JOIN over views in curly braces
diff --git a/mysql-test/t/join_outer_innodb.test b/mysql-test/t/join_outer_innodb.test
new file mode 100644
index 00000000000..40add7f488f
--- /dev/null
+++ b/mysql-test/t/join_outer_innodb.test
@@ -0,0 +1,26 @@
+#
+# test of left outer join for tests that depends on innodb
+#
+
+--source include/have_innodb.inc
+
+#
+# Test for bug #17164: ORed FALSE blocked conversion of outer join into join
+#
+
+CREATE TABLE t1 (id int(11) NOT NULL PRIMARY KEY, name varchar(20),
+ INDEX (name)) ENGINE=InnoDB;
+CREATE TABLE t2 (id int(11) NOT NULL PRIMARY KEY, fkey int(11),
+ FOREIGN KEY (fkey) REFERENCES t2(id)) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1,'A1'),(2,'A2'),(3,'B');
+INSERT INTO t2 VALUES (1,1),(2,2),(3,2),(4,3),(5,3);
+
+EXPLAIN
+SELECT COUNT(*) FROM t2 LEFT JOIN t1 ON t2.fkey = t1.id
+ WHERE t1.name LIKE 'A%';
+
+EXPLAIN
+SELECT COUNT(*) FROM t2 LEFT JOIN t1 ON t2.fkey = t1.id
+ WHERE t1.name LIKE 'A%' OR FALSE;
+
+DROP TABLE t1,t2;
diff --git a/mysql-test/t/lock.test b/mysql-test/t/lock.test
index 8300219b3d4..fb5e45433e9 100644
--- a/mysql-test/t/lock.test
+++ b/mysql-test/t/lock.test
@@ -93,3 +93,18 @@ delete t2 from t1,t2 where t1.a=t2.a;
drop table t1,t2;
# End of 4.1 tests
+
+#
+# Bug#18884 "lock table + global read lock = crash"
+# The bug is not repeatable, just add the test case.
+#
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+create table t1 (a int);
+lock table t1 write;
+--error ER_LOCK_OR_ACTIVE_TRANSACTION
+flush tables with read lock;
+unlock tables;
+drop table t1;
+
diff --git a/mysql-test/t/lock_multi.test b/mysql-test/t/lock_multi.test
index 5bebec49b88..9c9e68f931f 100644
--- a/mysql-test/t/lock_multi.test
+++ b/mysql-test/t/lock_multi.test
@@ -156,6 +156,55 @@ connection locker;
use test;
#
connection default;
+#
+# Test if CREATE TABLE with LOCK TABLE deadlocks.
+#
+connection writer;
+CREATE TABLE t1 (c1 int);
+LOCK TABLE t1 WRITE;
+#
+# This waits until t1 is unlocked.
+connection locker;
+send FLUSH TABLES WITH READ LOCK;
+--sleep 1
+#
+# This must not block.
+connection writer;
+CREATE TABLE t2 (c1 int);
+UNLOCK TABLES;
+#
+# This awakes now.
+connection locker;
+reap;
+UNLOCK TABLES;
+#
+connection default;
+DROP TABLE t1, t2;
+#
+# Test if CREATE TABLE SELECT with LOCK TABLE deadlocks.
+#
+connection writer;
+CREATE TABLE t1 (c1 int);
+LOCK TABLE t1 WRITE;
+#
+# This waits until t1 is unlocked.
+connection locker;
+send FLUSH TABLES WITH READ LOCK;
+--sleep 1
+#
+# This must not block.
+connection writer;
+--error 1100
+CREATE TABLE t2 AS SELECT * FROM t1;
+UNLOCK TABLES;
+#
+# This awakes now.
+connection locker;
+reap;
+UNLOCK TABLES;
+#
+connection default;
+DROP TABLE t1;
#
# Bug#19815 - CREATE/RENAME/DROP DATABASE can deadlock on a global read lock
@@ -218,32 +267,4 @@ connection locker;
drop table t1;
# End of 5.0 tests
-# Bug#16986 - Deadlock condition with MyISAM tables
-#
-connection locker;
-use mysql;
-LOCK TABLES columns_priv WRITE, db WRITE, host WRITE, user WRITE;
-FLUSH TABLES;
---sleep 1
-#
-connection reader;
-use mysql;
-#NOTE: This must be a multi-table select, otherwise the deadlock will not occur
-send SELECT user.Select_priv FROM user, db WHERE user.user = db.user LIMIT 1;
---sleep 1
-#
-connection locker;
-# Make test case independent from earlier grants.
---replace_result "Table is already up to date" "OK"
-OPTIMIZE TABLES columns_priv, db, host, user;
-UNLOCK TABLES;
-#
-connection reader;
-reap;
-use test;
-#
-connection locker;
-use test;
-#
-connection default;
diff --git a/mysql-test/t/log_state.test b/mysql-test/t/log_state.test
index 41fbd068dce..6fc0f3421a7 100644
--- a/mysql-test/t/log_state.test
+++ b/mysql-test/t/log_state.test
@@ -80,7 +80,8 @@ set global general_log_file='';
--replace_column 2 #
show variables like 'general_log_file';
set global general_log= OFF;
-set global general_log_file='/tmp/log.master';
+--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
+eval set global general_log_file='$MYSQLTEST_VARDIR/tmp/log.master';
set global general_log= ON;
create table t1(f1 int);
drop table t1;
diff --git a/mysql-test/t/lowercase_fs_off.test b/mysql-test/t/lowercase_fs_off.test
new file mode 100644
index 00000000000..7f7b573e7ee
--- /dev/null
+++ b/mysql-test/t/lowercase_fs_off.test
@@ -0,0 +1,27 @@
+#
+# Specific tests for case sensitive file systems
+# i.e. lower_case_filesystem=OFF
+#
+-- source include/have_case_sensitive_file_system.inc
+
+connect (master,localhost,root,,);
+connection master;
+create database d1;
+grant all on d1.* to 'sample'@'localhost' identified by 'password';
+flush privileges;
+
+connect (sample,localhost,sample,password,d1);
+connection sample;
+select database();
+--error 1044
+create database d2;
+--error 1044
+create database D1;
+disconnect sample;
+
+connection master;
+drop database if exists d1;
+disconnect master;
+connection default;
+
+# End of 4.1 tests
diff --git a/mysql-test/t/merge.test b/mysql-test/t/merge.test
index c211ff2ac29..33282348325 100644
--- a/mysql-test/t/merge.test
+++ b/mysql-test/t/merge.test
@@ -384,16 +384,6 @@ drop table t1, t2, t3;
# End of 4.1 tests
#
-# BUG#10952 - alter table ... lost data without errors and warnings
-#
-drop table if exists t1;
-create table t1 (c char(20)) engine=MyISAM;
-insert into t1 values ("Monty"),("WAX"),("Walrus");
---error 1031
-alter table t1 engine=MERGE;
-drop table t1;
-
-#
# BUG#19648 - Merge table does not work with bit types
#
create table t1 (b bit(1));
diff --git a/mysql-test/t/myisam.test b/mysql-test/t/myisam.test
index 9936b8bfc44..69d7ee51d81 100644
--- a/mysql-test/t/myisam.test
+++ b/mysql-test/t/myisam.test
@@ -718,8 +718,6 @@ UPDATE t1 AS ta1,t1 AS ta2 SET ta1.b='aaaaaa',ta2.b='bbbbbb';
SELECT * FROM t1;
DROP TABLE t1;
-# End of 4.1 tests
-
#
# Test varchar
#
@@ -817,6 +815,42 @@ alter table t1 enable keys;
show keys from t1;
drop table t1;
+#
+# Bug#8706 - temporary table with data directory option fails
+#
+connect (session1,localhost,root,,);
+connect (session2,localhost,root,,);
+
+connection session1;
+disable_query_log;
+eval create temporary table t1 (a int) engine=myisam data directory="$MYSQLTEST_VARDIR/tmp" select 9 a;
+enable_query_log;
+disable_result_log;
+show create table t1;
+enable_result_log;
+
+connection session2;
+disable_query_log;
+eval create temporary table t1 (a int) engine=myisam data directory="$MYSQLTEST_VARDIR/tmp" select 99 a;
+enable_query_log;
+disable_result_log;
+show create table t1;
+enable_result_log;
+
+connection default;
+create table t1 (a int) engine=myisam select 42 a;
+
+connection session1;
+select * from t1;
+disconnect session1;
+connection session2;
+select * from t1;
+disconnect session2;
+connection default;
+select * from t1;
+drop table t1;
+
+--echo End of 4.1 tests
#
# Bug#10056 - PACK_KEYS option take values greater than 1 while creating table
@@ -828,6 +862,8 @@ create table t3 (c1 int) engine=myisam pack_keys=default;
create table t4 (c1 int) engine=myisam pack_keys=2;
drop table t1, t2, t3;
+--echo End of 5.0 tests
+
#
# Test of key_block_size
#
@@ -890,3 +926,5 @@ drop table t1;
create table t1 (a int not null, key key_block_size=1024 (a));
--error 1064
create table t1 (a int not null, key `a` key_block_size=1024 (a));
+
+--echo End of 5.1 tests
diff --git a/mysql-test/t/mysql.test b/mysql-test/t/mysql.test
index ac4c323f51e..98fadcfc75d 100644
--- a/mysql-test/t/mysql.test
+++ b/mysql-test/t/mysql.test
@@ -20,16 +20,16 @@ insert into t1 values(1);
--disable_query_log
# Test delimiter : supplied on the command line
-select "Test delimiter : from command line" as " ";
+select "Test delimiter : from command line" as "_";
--exec $MYSQL test --delimiter=":" -e "select * from t1:"
# Test delimiter :; supplied on the command line
-select "Test delimiter :; from command line" as " ";
+select "Test delimiter :; from command line" as "_";
--exec $MYSQL test --delimiter=":;" -e "select * from t1:;"
# Test 'go' command (vertical output) \G
-select "Test 'go' command(vertical output) \G" as " ";
+select "Test 'go' command(vertical output) \G" as "_";
--exec $MYSQL test -e "select * from t1\G"
# Test 'go' command \g
-select "Test 'go' command \g" as " ";
+select "Test 'go' command \g" as "_";
--exec $MYSQL test -e "select * from t1\g"
--enable_query_log
drop table t1;
diff --git a/mysql-test/t/mysql_client.test b/mysql-test/t/mysql_client.test
new file mode 100644
index 00000000000..e4b6658b631
--- /dev/null
+++ b/mysql-test/t/mysql_client.test
@@ -0,0 +1,29 @@
+# This test should work in embedded server after we fix mysqltest
+-- source include/not_embedded.inc
+
+#
+# Bug #20432: mysql client interprets commands in comments
+#
+
+# if the client sees the 'use' within the comment, we haven't fixed
+--exec echo "/*" > $MYSQLTEST_VARDIR/tmp/bug20432.sql
+--exec echo "use" >> $MYSQLTEST_VARDIR/tmp/bug20432.sql
+--exec echo "*/" >> $MYSQLTEST_VARDIR/tmp/bug20432.sql
+--exec $MYSQL < $MYSQLTEST_VARDIR/tmp/bug20432.sql 2>&1
+
+# SQL can have embedded comments => workie
+--exec echo "select /*" > $MYSQLTEST_VARDIR/tmp/bug20432.sql
+--exec echo "use" >> $MYSQLTEST_VARDIR/tmp/bug20432.sql
+--exec echo "*/ 1" >> $MYSQLTEST_VARDIR/tmp/bug20432.sql
+--exec $MYSQL < $MYSQLTEST_VARDIR/tmp/bug20432.sql 2>&1
+
+# client commands on the other hand must be at BOL => error
+--exec echo "/*" > $MYSQLTEST_VARDIR/tmp/bug20432.sql
+--exec echo "xxx" >> $MYSQLTEST_VARDIR/tmp/bug20432.sql
+--exec echo "*/ use" >> $MYSQLTEST_VARDIR/tmp/bug20432.sql
+--error 1
+--exec $MYSQL < $MYSQLTEST_VARDIR/tmp/bug20432.sql 2>&1
+
+# client comment recognized, but parameter missing => error
+--exec echo "use" > $MYSQLTEST_VARDIR/tmp/bug20432.sql
+--exec $MYSQL < $MYSQLTEST_VARDIR/tmp/bug20432.sql 2>&1
diff --git a/mysql-test/t/mysqldump.test b/mysql-test/t/mysqldump.test
index 41b4480e6e0..31a4bf3fbd0 100644
--- a/mysql-test/t/mysqldump.test
+++ b/mysql-test/t/mysqldump.test
@@ -759,7 +759,7 @@ create table t3(a int);
--exec $MYSQL_DUMP --skip-comments --force --no-data test t3 t1 non_existing t2
drop table t1, t2, t3;
---echo End of 4.1 tests
+#
# Bug #13318: Bad result with empty field and --hex-blob
#
create table t1 (a binary(1), b blob);
@@ -1095,7 +1095,7 @@ SET SQL_MODE = @old_sql_mode;
DROP TRIGGER tr1;
DROP TABLE t1;
-#
+--echo End of 4.1 tests
#
# Bug 14871 Invalid view dump output
@@ -1159,6 +1159,26 @@ drop table t1, t2;
#
+# Bug#18462 mysqldump does not dump view structures correctly
+#
+#
+create table t (qty int, price int);
+insert into t values(3, 50);
+insert into t values(5, 51);
+create view v1 as select qty, price, qty*price as value from t;
+create view v2 as select qty from v1;
+--echo mysqldump {
+--exec $MYSQL_DUMP --compact -F --tab $MYSQLTEST_VARDIR/tmp test
+--exec cat $MYSQLTEST_VARDIR/tmp/v1.sql
+--echo } mysqldump {
+--exec cat $MYSQLTEST_VARDIR/tmp/v2.sql
+--echo } mysqldump
+drop view v1;
+drop view v2;
+drop table t;
+
+
+#
# Bug#14857 Reading dump files with single statement stored routines fails.
# fixed by patch for bug#16878
#
@@ -1174,6 +1194,58 @@ show create procedure p;
drop function f;
drop procedure p;
+#
+# Bug #17371 Unable to dump a schema with invalid views
+#
+#
+create table t1 ( id serial );
+create view v1 as select * from t1;
+drop table t1;
+# mysqldump gets 1356 from server, but gives us 2
+--echo mysqldump {
+--error 2
+--exec $MYSQL_DUMP --force -N --compact --skip-comments test
+--echo } mysqldump
+drop view v1;
+
+# BUG#17201 Spurious 'DROP DATABASE' in output,
+# also confusion between tables and views.
+# Example code from Markus Popp
+
+create database mysqldump_test_db;
+use mysqldump_test_db;
+create table t1 (id int);
+create view v1 as select * from t1;
+insert into t1 values (1232131);
+insert into t1 values (4711);
+insert into t1 values (3231);
+insert into t1 values (0815);
+--exec $MYSQL_DUMP --skip-comments --add-drop-database --databases mysqldump_test_db
+drop view v1;
+drop table t1;
+drop database mysqldump_test_db;
+
+# Bug21014 Segmentation fault of mysqldump on view
+
+create database mysqldump_tables;
+use mysqldump_tables;
+create table basetable ( id serial, tag varchar(64) );
+
+create database mysqldump_views;
+use mysqldump_views;
+create view nasishnasifu as select mysqldump_tables.basetable.id from mysqldump_tables.basetable;
+
+--exec $MYSQL_DUMP --skip-comments --databases mysqldump_tables mysqldump_views;
+
+drop view nasishnasifu;
+drop database mysqldump_views;
+drop table mysqldump_tables.basetable;
+drop database mysqldump_tables;
+use test;
+
+--echo End of 5.0 tests
+
+#
# Added for use-thread option
#
create table t1 (a text , b text);
@@ -1269,3 +1341,5 @@ insert into t1 values (0815);
drop view v1;
drop table t1;
drop database mysqldump_test_db;
+
+--echo End of 5.1 tests
diff --git a/mysql-test/t/mysqltest.test b/mysql-test/t/mysqltest.test
index 42065cfae81..86cfd66ae2b 100644
--- a/mysql-test/t/mysqltest.test
+++ b/mysql-test/t/mysqltest.test
@@ -411,8 +411,8 @@ echo - MySQL: The world''s
- source database;
echo - MySQL: The world''s
--- most popular open
--- source database;
+-- most popular
+-- open source database;
echo # MySQL: The
--world''s
@@ -469,8 +469,8 @@ echo $message;
let $message= -- MySQL: The
-- world''s most
--- popular open
--- source database;
+-- popular
+-- open source database;
echo $message;
let $message= # MySQL: The
diff --git a/mysql-test/t/ndb_alter_table.test b/mysql-test/t/ndb_alter_table.test
index 73c612b203f..fde9f1479f8 100644
--- a/mysql-test/t/ndb_alter_table.test
+++ b/mysql-test/t/ndb_alter_table.test
@@ -389,6 +389,7 @@ drop index i1 on t1;
--disable_warnings
--exec $NDB_TOOLS_DIR/ndb_show_tables --p > $MYSQLTEST_VARDIR/master-data/test/tmp.dat
LOAD DATA INFILE 'tmp.dat' INTO TABLE ndb_show_tables;
+--exec rm $MYSQLTEST_VARDIR/master-data/test/tmp.dat || true
--enable_warnings
select 'no_copy' from ndb_show_tables where id = @t1_id and name like '%t1%';
diff --git a/mysql-test/t/ndb_autodiscover3.test b/mysql-test/t/ndb_autodiscover3.test
index ed75c89cdd1..6c321da8f36 100644
--- a/mysql-test/t/ndb_autodiscover3.test
+++ b/mysql-test/t/ndb_autodiscover3.test
@@ -2,7 +2,6 @@
-- source include/have_multi_ndb.inc
-- source include/not_embedded.inc
-
--disable_warnings
drop table if exists t1, t2;
--enable_warnings
@@ -18,7 +17,9 @@ insert into t1 values (1);
--exec $NDB_MGM --no-defaults -e "all restart" >> $NDB_TOOLS_OUTPUT
--exec $NDB_TOOLS_DIR/ndb_waiter --no-defaults >> $NDB_TOOLS_OUTPUT
-
+# Wait for mysqld to reconnect and exit from readonly mode
+# Should preferrably be a "while (!"select ndb_readonly")" loop
+sleep 2;
--error 1297
insert into t1 values (2);
--error 1296
@@ -38,7 +39,7 @@ select * from t2 order by a limit 3;
--exec $NDB_TOOLS_DIR/ndb_waiter --no-defaults >> $NDB_TOOLS_OUTPUT
# to ensure mysqld has connected again, and recreated system tables
--exec $NDB_TOOLS_DIR/ndb_desc --no-defaults -r 30 -d cluster apply_status >> $NDB_TOOLS_OUTPUT
-
+sleep 2;
--connection server2
--error ER_NO_SUCH_TABLE
select * from t2;
@@ -57,7 +58,7 @@ reset master;
--exec $NDB_TOOLS_DIR/ndb_waiter --no-defaults >> $NDB_TOOLS_OUTPUT
# to ensure mysqld has connected again, and recreated system tables
--exec $NDB_TOOLS_DIR/ndb_desc --no-defaults -r 30 -d cluster apply_status >> $NDB_TOOLS_OUTPUT
-
+sleep 2;
--connection server1
--error ER_NO_SUCH_TABLE
select * from t2;
diff --git a/mysql-test/t/ndb_binlog_discover.test b/mysql-test/t/ndb_binlog_discover.test
index e74bd3380bd..9c86922b82f 100644
--- a/mysql-test/t/ndb_binlog_discover.test
+++ b/mysql-test/t/ndb_binlog_discover.test
@@ -14,6 +14,20 @@ reset master;
--exec $NDB_TOOLS_DIR/ndb_waiter --no-defaults --not-started > /dev/null
--exec $NDB_MGM --no-defaults -e "all start" > /dev/null
--exec $NDB_TOOLS_DIR/ndb_waiter --no-defaults > /dev/null
-insert into t1 values(1);
+
+--disable_query_log
+let $mysql_errno= 1;
+while ($mysql_errno)
+{
+ # Table t1 is readonly until the mysqld has reconnected properly
+ --error 0,1036
+ insert into t1 values(1);
+ if ($mysql_errno)
+ {
+ --sleep 0.1
+ }
+}
+--enable_query_log
+
--source include/show_binlog_events.inc
drop table t1;
diff --git a/mysql-test/t/ndb_blob_partition.test b/mysql-test/t/ndb_blob_partition.test
index a3948cc9491..6173c9d9851 100644
--- a/mysql-test/t/ndb_blob_partition.test
+++ b/mysql-test/t/ndb_blob_partition.test
@@ -1,6 +1,10 @@
--source include/have_ndb.inc
-- source include/not_embedded.inc
+--disable_query_log
+set new=on;
+--enable_query_log
+
--disable_warnings
drop table if exists t1;
--enable_warnings
diff --git a/mysql-test/t/ndb_cache_multi.test b/mysql-test/t/ndb_cache_multi.test
index beb8e4bc2ac..404393c211e 100644
--- a/mysql-test/t/ndb_cache_multi.test
+++ b/mysql-test/t/ndb_cache_multi.test
@@ -63,3 +63,10 @@ show status like "Qcache_hits";
drop table t1, t2;
+# Turn off query cache on server1
+connection server1;
+set GLOBAL query_cache_size=0;
+
+# Turn off query cache on server2
+connection server2;
+set GLOBAL query_cache_size=0;
diff --git a/mysql-test/t/ndb_dd_advance.test b/mysql-test/t/ndb_dd_advance.test
new file mode 100755
index 00000000000..e882ec794c1
--- /dev/null
+++ b/mysql-test/t/ndb_dd_advance.test
@@ -0,0 +1,630 @@
+##############################################################
+# Author: JBM
+# Date: 2006-01-12
+# Purpose: To test using ndb memory and disk tables together.
+##############################################################
+
+##############################################################
+# Author: Nikolay
+# Date: 2006-05-12
+# Purpose: To test using ndb memory and disk tables together.
+#
+# Select from disk into memory table
+# Select from disk into memory table
+# Create test that loads data, use mysql dump to dump data, drop table,
+# create table and load from mysql dump.
+# Use group by asc and dec; Use having; Use order by
+# ALTER Tests (Meta data testing):
+# ALTER from InnoDB to Cluster Disk Data
+# ALTER from MyISAM to Cluster Disk Data
+# ALTER from Cluster Disk Data to InnoDB
+# ALTER from Cluster Disk Data to MyISAM
+# ALTER DD Tables and add columns
+# ALTER DD Tables and add Indexes
+# ALTER DD Tables and drop columns
+#
+##############################################################
+
+-- source include/have_ndb.inc
+-- source include/not_embedded.inc
+
+--disable_warnings
+DROP TABLE IF EXISTS test.t1;
+DROP TABLE IF EXISTS test.t2;
+--enable_warnings
+
+############ Test Setup Section #############
+-- echo **** Test Setup Section ****
+
+CREATE LOGFILE GROUP log_group1
+ADD UNDOFILE './log_group1/undofile.dat'
+INITIAL_SIZE 16M
+UNDO_BUFFER_SIZE = 1M
+ENGINE=NDB;
+
+CREATE TABLESPACE table_space1
+ADD DATAFILE './table_space1/datafile.dat'
+USE LOGFILE GROUP log_group1
+INITIAL_SIZE 12M
+ENGINE NDB;
+
+
+CREATE TABLE test.t1
+(pk1 INT NOT NULL PRIMARY KEY, b INT NOT NULL, c INT NOT NULL)
+TABLESPACE table_space1 STORAGE DISK
+ENGINE=NDB;
+
+CREATE TABLE test.t2
+(pk2 INT NOT NULL PRIMARY KEY, b2 INT NOT NULL, c2 INT NOT NULL)
+ENGINE=NDB;
+
+--echo
+##################### Data load for first test ####################
+--echo **** Data load for first test ****
+
+INSERT INTO test.t1 VALUES
+(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),
+(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10),
+(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15),
+(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20),
+(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25),
+(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30),
+(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35),
+(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40),
+(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45),
+(46,46,46),(47,47,47),(48,48,48),(49,49,49),(50,50,50),
+(51,51,51),(52,52,52),(53,53,53),(54,54,54),(55,55,55),
+(56,56,56),(57,57,57),(58,58,58),(59,59,59),(60,60,60),
+(61,61,61),(62,62,62),(63,63,63),(64,64,64),(65,65,65),
+(66,66,66),(67,67,67),(68,68,68),(69,69,69),(70,70,70),
+(71,71,71),(72,72,72),(73,73,73),(74,74,74),(75,75,75);
+
+
+INSERT INTO test.t2 VALUES
+(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),
+(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10),
+(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15),
+(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20),
+(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25),
+(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30),
+(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35),
+(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40),
+(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45),
+(46,46,46),(47,47,47),(48,48,48),(49,49,49),(50,50,50),
+(51,51,51),(52,52,52),(53,53,53),(54,54,54),(55,55,55),
+(56,56,56),(57,57,57),(58,58,58),(59,59,59),(60,60,60),
+(61,61,61),(62,62,62),(63,63,63),(64,64,64),(65,65,65),
+(66,66,66),(67,67,67),(68,68,68),(69,69,69),(70,70,70),
+(71,71,71),(72,72,72),(73,73,73),(74,74,74),(75,75,75);
+
+--echo
+##################### Test 1 Section Begins ###############
+--echo *** Test 1 Section Begins ***
+SELECT COUNT(*) FROM test.t2 LEFT JOIN test.t1 ON pk2=pk1 WHERE b2 IN (4);
+SELECT * FROM test.t2 LEFT JOIN test.t1 ON pk2=pk1 WHERE b2 IN (4);
+SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 WHERE b IN (4);
+SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON b=b2 WHERE pk1 IN (75);
+SELECT b, c FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 ORDER BY b;
+--echo
+####################### Test 1 Section End ################
+
+##################### Setup for test 2 ####################
+--echo *** Setup for test 2 ****
+DELETE FROM test.t1;
+INSERT INTO test.t1 VALUES
+(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),
+(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10),
+(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15),
+(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20),
+(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25),
+(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30),
+(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35),
+(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40),
+(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45);
+--echo
+############################# Test Section 2 ###############
+--echo **** Test Section 2 ****
+SELECT b, c FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 ORDER BY b;
+SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON b=b2;
+SELECT COUNT(*) FROM test.t1 RIGHT JOIN test.t2 ON b=b2;
+SHOW CREATE TABLE test.t2;
+SHOW CREATE TABLE test.t1;
+ALTER TABLE test.t2 TABLESPACE table_space1 STORAGE DISK
+ENGINE=NDB;
+SHOW CREATE TABLE test.t2;
+ALTER TABLE test.t1 ENGINE=NDBCLUSTER;
+SHOW CREATE TABLE test.t1;
+--echo
+######################### End Test Section 2 #################
+DROP TABLE test.t1;
+DROP TABLE test.t2;
+##################### Setup for Test Section 3 ###############
+--echo *** Setup for Test Section 3 ***
+CREATE TABLE test.t1 (
+ usr_id INT unsigned NOT NULL,
+ uniq_id INT unsigned NOT NULL AUTO_INCREMENT,
+ start_num INT unsigned NOT NULL DEFAULT 1,
+ increment INT unsigned NOT NULL DEFAULT 1,
+ PRIMARY KEY (uniq_id),
+ INDEX usr_uniq_idx (usr_id, uniq_id),
+ INDEX uniq_usr_idx (uniq_id, usr_id))
+TABLESPACE table_space1 STORAGE DISK
+ENGINE=NDB;
+
+
+CREATE TABLE test.t2 (
+ id INT unsigned NOT NULL DEFAULT 0,
+ usr2_id INT unsigned NOT NULL DEFAULT 0,
+ max INT unsigned NOT NULL DEFAULT 0,
+ c_amount INT unsigned NOT NULL DEFAULT 0,
+ d_max INT unsigned NOT NULL DEFAULT 0,
+ d_num INT unsigned NOT NULL DEFAULT 0,
+ orig_time INT unsigned NOT NULL DEFAULT 0,
+ c_time INT unsigned NOT NULL DEFAULT 0,
+ active ENUM ("no","yes") NOT NULL,
+ PRIMARY KEY (id,usr2_id),
+ INDEX id_idx (id),
+ INDEX usr2_idx (usr2_id))
+ENGINE=NDB;
+
+INSERT INTO test.t1 VALUES (3,NULL,0,50),(3,NULL,0,200),(3,NULL,0,25),(3,NULL,0,84676),(3,NULL,0,235),(3,NULL,0,10),(3,NULL,0,3098),(3,NULL,0,2947),(3,NULL,0,8987),(3,NULL,0,8347654),(3,NULL,0,20398),(3,NULL,0,8976),(3,NULL,0,500),(3,NULL,0,198);
+
+--echo
+###################### Test Section 3 ######################
+--echo **** Test Section 3 ****
+SELECT test.t1.usr_id,test.t1.uniq_id,test.t1.increment,
+test.t2.usr2_id,test.t2.c_amount,test.t2.max
+FROM test.t1
+LEFT JOIN test.t2 ON test.t2.id = test.t1.uniq_id
+WHERE test.t1.uniq_id = 4
+ORDER BY test.t2.c_amount;
+
+INSERT INTO test.t2 VALUES (2,3,3000,6000,0,0,746584,837484,'yes');
+INSERT INTO test.t2 VALUES (4,3,3000,6000,0,0,746584,837484,'yes');
+INSERT INTO test.t2 VALUES (7,3,1000,2000,0,0,746294,937484,'yes');
+
+SELECT test.t1.usr_id,test.t1.uniq_id,test.t1.increment,
+test.t2.usr2_id,test.t2.c_amount,test.t2.max
+FROM test.t1
+LEFT JOIN test.t2 ON test.t2.id = test.t1.uniq_id
+WHERE test.t1.uniq_id = 4
+ORDER BY test.t2.c_amount;
+--echo
+####################### End Section 3 #########################
+DROP TABLE test.t1;
+DROP TABLE test.t2;
+ALTER TABLESPACE table_space1
+DROP DATAFILE './table_space1/datafile.dat'
+ENGINE = NDB;
+
+DROP TABLESPACE table_space1
+ENGINE = NDB;
+
+DROP LOGFILE GROUP log_group1
+ENGINE =NDB;
+
+####################### Section 4 #########################
+
+ CREATE LOGFILE GROUP lg
+ ADD UNDOFILE './lg_group/undofile.dat'
+ INITIAL_SIZE 16M
+ UNDO_BUFFER_SIZE = 1M
+ ENGINE=NDB;
+
+ CREATE TABLESPACE ts1
+ ADD DATAFILE './table_space1/datafile.dat'
+ USE LOGFILE GROUP lg
+ INITIAL_SIZE 12M
+ ENGINE NDB;
+
+ CREATE TABLESPACE ts2
+ ADD DATAFILE './table_space2/datafile.dat'
+ USE LOGFILE GROUP lg
+ INITIAL_SIZE 12M
+ ENGINE NDB;
+
+
+### Select from disk into memory table ###
+
+ CREATE TABLE t1 (a int NOT NULL PRIMARY KEY, b int)
+ TABLESPACE ts1 STORAGE DISK ENGINE=NDB;
+ CREATE TABLE t2 (a int NOT NULL PRIMARY KEY, b int)
+ ENGINE=NDB;
+
+ SHOW CREATE TABLE t1;
+ SHOW CREATE TABLE t2;
+
+ INSERT INTO t1 VALUES (1,1);
+ INSERT INTO t1 VALUES (2,2);
+ SELECT * FROM t1 order by a;
+ INSERT INTO t2(a,b) SELECT * FROM t1;
+ SELECT * FROM t2 order by a;
+
+### Select from disk into memory table ###
+
+ TRUNCATE t1;
+ TRUNCATE t2;
+ INSERT INTO t2 VALUES (3,3);
+ INSERT INTO t2 VALUES (4,4);
+ INSERT INTO t1(a,b) SELECT * FROM t2;
+ SELECT * FROM t1 order by a;
+
+ DROP TABLE t1, t2;
+
+ ALTER TABLESPACE ts1
+ DROP DATAFILE './table_space1/datafile.dat'
+ ENGINE NDB;
+
+ DROP TABLESPACE ts1 ENGINE NDB;
+
+ ALTER TABLESPACE ts2
+ DROP DATAFILE './table_space2/datafile.dat'
+ ENGINE NDB;
+
+ DROP TABLESPACE ts2 ENGINE NDB;
+
+ DROP LOGFILE GROUP lg
+ ENGINE=NDB;
+
+#### Create test that loads data, use mysql dump to dump data, drop table,
+#### create table and load from mysql dump.
+
+# DROP DATABASE IF EXISTS test;
+
+ CREATE LOGFILE GROUP lg
+ ADD UNDOFILE './undofile.dat'
+ INITIAL_SIZE 16M
+ UNDO_BUFFER_SIZE = 1M
+ ENGINE=NDB;
+
+ CREATE TABLESPACE ts
+ ADD DATAFILE './datafile.dat'
+ USE LOGFILE GROUP lg
+ INITIAL_SIZE 12M
+ ENGINE NDB;
+
+# CREATE DATABASE test;
+
+CREATE TABLE test.t (
+ a smallint NOT NULL,
+ b int NOT NULL,
+ c bigint NOT NULL,
+ d char(10),
+ e TEXT,
+ f VARCHAR(255),
+ PRIMARY KEY(a)
+) TABLESPACE ts STORAGE DISK ENGINE=NDB;
+
+ ALTER TABLE test.t ADD INDEX (d), ADD INDEX (f);
+ SHOW CREATE TABLE test.t;
+
+# insert records into tables
+
+ let $1=100;
+ disable_query_log;
+ while ($1)
+ {
+ eval insert into test.t values($1, $1+1, $1+2, "aaa$1", "bbb$1", "ccccc$1");
+ dec $1;
+ }
+ enable_query_log;
+
+ SELECT * FROM test.t order by a;
+--exec $MYSQL_DUMP --skip-comments --databases test > $MYSQLTEST_VARDIR/tmp/t_dump.sql
+DROP TABLE test.t;
+--exec $MYSQL test < $MYSQLTEST_VARDIR/tmp/t_dump.sql
+USE test;
+show tables;
+
+SELECT * FROM test.t order by a;
+
+ DROP TABLE test.t;
+# DROP DATABASE test;
+
+ ALTER TABLESPACE ts
+ DROP DATAFILE './datafile.dat'
+ ENGINE NDB;
+
+ DROP TABLESPACE ts ENGINE NDB;
+
+ DROP LOGFILE GROUP lg
+ ENGINE=NDB;
+
+#### BUG 18856 test case comented out
+##### Use "SELECT * INTO OUTFILE" to dump data and "LOAD DATA INFILE" to load ##### data back to the data file.
+
+# CREATE LOGFILE GROUP lg
+# ADD UNDOFILE './undofile.dat'
+# INITIAL_SIZE 16M
+# UNDO_BUFFER_SIZE = 1M
+# ENGINE=NDB;
+
+# CREATE TABLESPACE ts
+# ADD DATAFILE './datafile.dat'
+# USE LOGFILE GROUP lg
+# INITIAL_SIZE 12M
+# ENGINE NDB;
+
+#CREATE DATABASE test;
+
+#CREATE TABLE test.t (
+# a smallint NOT NULL,
+# b int NOT NULL,
+# c bigint NOT NULL,
+# d char(10),
+# e TEXT,
+# f VARCHAR(255),
+# PRIMARY KEY(a)
+#) TABLESPACE ts STORAGE DISK ENGINE=NDB;
+
+# ALTER TABLE test.t ADD INDEX (d), ADD INDEX (f);
+# SHOW CREATE TABLE test.t;
+
+# insert records into tables
+
+# let $1=100;
+# disable_query_log;
+# while ($1)
+# {
+# eval insert into test.t values($1, $1+1, $1+2, "aaa$1", "bbb$1", "ccccc$1");
+# dec $1;
+# }
+# enable_query_log;
+
+# SELECT * FROM test.t order by a;
+
+# SELECT * INTO OUTFILE 't_backup' FROM test.t;
+# TRUNCATE test.t;
+
+#'TRUNCATE test.t' failed: 1205: Lock wait timeout exceeded; try restarting #transaction. TABLESPACE ts STORAGE DISK ENGINE=NDB;
+
+# SELECT count(*) FROM test.t;
+# LOAD DATA INFILE 't_backup' INTO TABLE test.t;
+
+# SELECT * FROM test.t order by a;
+
+# DROP TABLE test.t;
+# DROP DATABASE test;
+
+# ALTER TABLESPACE ts
+# DROP DATAFILE './datafile.dat'
+# ENGINE NDB;
+# DROP TABLESPACE ts ENGINE NDB;
+# DROP LOGFILE GROUP lg
+# ENGINE=NDB;
+
+#### Use group by asc and dec; Use having; Use order by. ####
+
+# DROP DATABASE IF EXISTS test;
+ DROP table IF EXISTS test.t1;
+ DROP table IF EXISTS test.t2;
+
+ CREATE LOGFILE GROUP lg
+ ADD UNDOFILE './lg_group/undofile.dat'
+ INITIAL_SIZE 16M
+ UNDO_BUFFER_SIZE = 1M
+ ENGINE=NDB;
+
+ CREATE TABLESPACE ts1
+ ADD DATAFILE './table_space1/datafile.dat'
+ USE LOGFILE GROUP lg
+ INITIAL_SIZE 12M
+ ENGINE NDB;
+
+ CREATE TABLESPACE ts2
+ ADD DATAFILE './table_space2/datafile.dat'
+ USE LOGFILE GROUP lg
+ INITIAL_SIZE 12M
+ ENGINE NDB;
+
+# CREATE DATABASE test;
+
+CREATE TABLE test.t1 (
+ a1 smallint NOT NULL,
+ a2 int NOT NULL,
+ a3 bigint NOT NULL,
+ a4 char(10),
+ a5 decimal(5,1),
+ a6 time,
+ a7 date,
+ a8 datetime,
+ a9 VARCHAR(255),
+ a10 blob,
+ PRIMARY KEY(a1)
+) TABLESPACE ts1 STORAGE DISK ENGINE=NDB;
+
+ ALTER TABLE test.t1 ADD INDEX (a2), ADD INDEX (a3), ADD INDEX (a8);
+ SHOW CREATE TABLE test.t1;
+
+CREATE TABLE test.t2 (
+ b1 smallint NOT NULL,
+ b2 int NOT NULL,
+ b3 bigint NOT NULL,
+ b4 char(10),
+ b5 decimal(5,1),
+ b6 time,
+ b7 date,
+ b8 datetime,
+ b9 VARCHAR(255),
+ b10 blob,
+ PRIMARY KEY(b1)
+) ENGINE=NDB;
+
+ ALTER TABLE test.t2 ADD INDEX (b2), ADD INDEX (b3), ADD INDEX (b8);
+ SHOW CREATE TABLE test.t2;
+
+let $1=20;
+disable_query_log;
+while ($1)
+{
+ eval insert into test.t1 values($1, $1+1, $1+2000000000, "aaa$1", 34.2, '4:3:2', '2006-1-1', '1971-5-28 16:55:03', "bbbbbbbbbbbbb$1", "binary data");
+ eval insert into test.t2 values($1+2, $1+3, $1+3000000000, "aaa$1", 35.2, '4:3:2', '2006-1-1', '1971-5-28 16:55:03', "bbbbbbbbbbbbb$1", "binary data");
+ dec $1;
+}
+enable_query_log;
+
+SELECT * FROM test.t1 order by a1;
+SELECT * FROM test.t2 order by b1;
+SELECT COUNT(a1), a1, COUNT(a1)*a1 FROM test.t1 GROUP BY a1;
+SELECT COUNT(a2), (a2+1), COUNT(a2)*(a2+0) FROM test.t1 GROUP BY a2;
+
+DROP TABLE test.t1;
+DROP TABLE test.t2;
+
+create table test.t1 (a int not null,b char(5), c text) TABLESPACE ts1 STORAGE DISK ENGINE=NDB;
+
+insert into test.t1 (a) values (1),(2),(3),(4),(1),(2),(3),(4);
+select distinct a from test.t1 group by b,a having a > 2 order by a desc;
+select distinct a,c from test.t1 group by b,c,a having a > 2 order by a desc;
+select distinct a from test.t1 group by b,a having a > 2 order by a asc;
+select distinct a,c from test.t1 group by b,c,a having a > 2 order by a asc;
+drop table test.t1;
+
+create table test.t1 (a char(1), key(a)) TABLESPACE ts1 STORAGE DISK ENGINE=NDB;
+insert into test.t1 values('1'),('1'),('1'),('2'),('2'),('3'),('3');
+select * from test.t1 where a >= '1' order by a;
+select distinct a from test.t1 order by a desc;
+select distinct a from test.t1 where a >= '1' order by a desc;
+select distinct a from test.t1 where a >= '1' order by a asc;
+drop table test.t1;
+
+CREATE TABLE test.t1 (email varchar(50), infoID BIGINT, dateentered DATETIME) TABLESPACE ts1 STORAGE DISK ENGINE=NDB;
+CREATE TABLE test.t2 (infoID BIGINT, shipcode varchar(10)) ENGINE=NDB;
+
+INSERT INTO test.t1 (email, infoID, dateentered) VALUES
+ ('test1@testdomain.com', 1, '2002-07-30 22:56:38'),
+ ('test1@testdomain.com', 1, '2002-07-27 22:58:16'),
+ ('test2@testdomain.com', 1, '2002-06-19 15:22:19'),
+ ('test2@testdomain.com', 2, '2002-06-18 14:23:47'),
+ ('test3@testdomain.com', 1, '2002-05-19 22:17:32');
+
+INSERT INTO test.t2(infoID, shipcode) VALUES
+ (1, 'Z001'),
+ (2, 'R002');
+
+SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 WHERE t1.infoID=t2.infoID order by email, shipcode;
+SELECT DISTINCTROW email FROM test.t1 ORDER BY dateentered DESC;
+SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 WHERE test.t1.infoID=test.t2.infoID ORDER BY dateentered DESC;
+drop table test.t1,test.t2;
+
+ ALTER TABLESPACE ts1
+ DROP DATAFILE './table_space1/datafile.dat'
+ ENGINE NDB;
+ DROP TABLESPACE ts1 ENGINE NDB;
+ ALTER TABLESPACE ts2
+ DROP DATAFILE './table_space2/datafile.dat'
+ ENGINE NDB;
+ DROP TABLESPACE ts2 ENGINE NDB;
+ DROP LOGFILE GROUP lg
+ ENGINE=NDB;
+####################################################################
+
+
+#### Customer posted order by test case
+
+DROP TABLE IF EXISTS test.t;
+create table test.t (f1 varchar(50) primary key, f2 text,f3 int) engine=NDB;
+insert into test.t (f1,f2,f3)VALUES("111111","aaaaaa",1);
+insert into test.t (f1,f2,f3)VALUES("222222","bbbbbb",2);
+select * from test.t order by f1;
+select f1,f2 from test.t order by f2;
+select f2 from test.t order by f2;
+select f1,f2 from test.t order by f1;
+drop table test.t;
+
+################## ALTER Tests (Meta data testing) ####################
+
+ CREATE LOGFILE GROUP lg
+ ADD UNDOFILE './lg_group/undofile.dat'
+ INITIAL_SIZE 16M
+ UNDO_BUFFER_SIZE = 1M
+ ENGINE=NDB;
+
+ CREATE TABLESPACE ts
+ ADD DATAFILE './table_space/datafile.dat'
+ USE LOGFILE GROUP lg
+ INITIAL_SIZE 12M
+ ENGINE NDB;
+
+#### Try to ALTER from InnoDB to Cluster Disk Data
+
+CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) ENGINE=InnoDB;
+SHOW CREATE TABLE test.t1;
+ALTER TABLE test.t1 TABLESPACE ts STORAGE DISK ENGINE=NDB;
+SHOW CREATE TABLE test.t1;
+DROP TABLE test.t1;
+
+#### Try to ALTER from MyISAM to Cluster Disk Data
+
+CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) ENGINE=MyISAM;
+SHOW CREATE TABLE test.t1;
+ALTER TABLE test.t1 TABLESPACE ts STORAGE DISK ENGINE=NDB;
+SHOW CREATE TABLE test.t1;
+DROP TABLE test.t1;
+
+#### Try to ALTER from Cluster Disk Data to InnoDB
+
+CREATE TABLE test.t1 (a1 INT PRIMARY KEY, a2 BLOB, a3 TEXT) TABLESPACE ts STORAGE DISK ENGINE=NDB;
+SHOW CREATE TABLE test.t1;
+ALTER TABLE test.t1 ENGINE=InnoDB;
+SHOW CREATE TABLE test.t1;
+DROP TABLE test.t1;
+
+#### Try to ALTER from Cluster Disk Data to MyISAM
+
+CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) TABLESPACE ts STORAGE DISK ENGINE=NDB;
+SHOW CREATE TABLE test.t1;
+ALTER TABLE test.t1 ENGINE=MyISAM;
+SHOW CREATE TABLE test.t1;
+DROP TABLE test.t1;
+
+#### Try to ALTER DD Tables and add columns
+
+CREATE TABLE test.t1 (a1 INT PRIMARY KEY) TABLESPACE ts STORAGE DISK ENGINE=NDB;
+
+ALTER TABLE test.t1 ADD a2 FLOAT, ADD a3 DOUBLE, ADD a4 BIT, ADD a5 TINYINT, ADD a6 BIGINT, ADD a7 DATE, ADD a8 TIME, ADD a9 DATETIME, ADD a10 TINYTEXT, ADD a11 MEDIUMTEXT, ADD a12 LONGTEXT, ADD a13 TEXT, ADD a14 BLOB;
+
+SHOW CREATE TABLE test.t1;
+
+#### Try to ALTER DD Tables and add Indexes
+
+ALTER TABLE test.t1 ADD INDEX (a2), ADD INDEX (a3), ADD INDEX (a5), ADD INDEX (a6),
+ADD INDEX (a7), ADD INDEX (a8);
+
+SHOW CREATE TABLE test.t1;
+
+DROP TABLE test.t1;
+
+#### Try to ALTER DD Tables and drop columns
+
+CREATE TABLE test.t1 (a1 INT PRIMARY KEY) TABLESPACE ts STORAGE DISK ENGINE=NDB;
+
+ALTER TABLE test.t1 ADD a2 FLOAT, ADD a3 DOUBLE, ADD a4 BIT, ADD a5 TINYINT, ADD a6 BIGINT, ADD a7 DATE, ADD a8 TIME, ADD a9 DATETIME, ADD a10 TINYTEXT, ADD a11 MEDIUMTEXT, ADD a12 LONGTEXT, ADD a13 TEXT, ADD a14 BLOB;
+
+SHOW CREATE TABLE test.t1;
+
+ALTER TABLE test.t1 DROP a14;
+ALTER TABLE test.t1 DROP a13;
+ALTER TABLE test.t1 DROP a12;
+ALTER TABLE test.t1 DROP a11;
+ALTER TABLE test.t1 DROP a10;
+ALTER TABLE test.t1 DROP a9;
+ALTER TABLE test.t1 DROP a8;
+ALTER TABLE test.t1 DROP a7;
+ALTER TABLE test.t1 DROP a6;
+ALTER TABLE test.t1 DROP PRIMARY KEY;
+
+SHOW CREATE TABLE test.t1;
+
+DROP TABLE test.t1;
+
+ ALTER TABLESPACE ts
+ DROP DATAFILE './table_space/datafile.dat'
+ ENGINE NDB;
+ DROP TABLESPACE ts ENGINE NDB;
+ DROP LOGFILE GROUP lg
+ ENGINE=NDB;
+
+####################### End section 4 #########################
+#End 5.1 test case
+
diff --git a/mysql-test/t/ndb_dd_advance2.test b/mysql-test/t/ndb_dd_advance2.test
new file mode 100755
index 00000000000..7b7a15ef01a
--- /dev/null
+++ b/mysql-test/t/ndb_dd_advance2.test
@@ -0,0 +1,724 @@
+##############################################################
+# Author: Nikolay
+# Date: 2006-04-01
+# Purpose: Specific Blob and Varchar testing using disk tables.
+##############################################################
+# Create Stored procedures that use disk based tables.
+# Create function that operate on disk based tables.
+# Create triggers that operate on disk based tables.
+# Create views that operate on disk based tables.
+# Try to create FK constraints on disk based tables.
+# Create and use disk based table that use auto inc.
+# Create test that use transaction (commit, rollback)
+# Create large disk base table, do random queries, check cache hits, do same
+# query 10 times check cache hits.
+# Create test that uses COUNT(), SUM(), MAX(), MIN(), NOW(), USER(), ... other built in # SQL functions
+# Create test that uses locks.
+# Create test using truncate.
+##############################################################
+
+-- source include/have_ndb.inc
+
+--disable_warnings
+DROP TABLE IF EXISTS test.t1;
+DROP TABLE IF EXISTS test.t2;
+DROP TABLE IF EXISTS test.t3;
+--enable_warnings
+
+#### Copy data from table in one table space to table in different table space. ####
+--echo *****
+--echo **** Copy data from table in one table space to table in different table space
+--echo *****
+
+ CREATE LOGFILE GROUP lg
+ ADD UNDOFILE './lg_group/undofile.dat'
+ INITIAL_SIZE 16M
+ UNDO_BUFFER_SIZE = 1M
+ ENGINE=NDB;
+
+ CREATE TABLESPACE ts1
+ ADD DATAFILE './table_space1/datafile.dat'
+ USE LOGFILE GROUP lg
+ INITIAL_SIZE 12M
+ ENGINE NDB;
+
+ CREATE TABLESPACE ts2
+ ADD DATAFILE './table_space2/datafile.dat'
+ USE LOGFILE GROUP lg
+ INITIAL_SIZE 12M
+ ENGINE NDB;
+
+ CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB)
+ TABLESPACE ts1 STORAGE DISK ENGINE=NDB;
+ CREATE TABLE test.t2 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB)
+ TABLESPACE ts2 STORAGE DISK ENGINE=NDB;
+
+ SHOW CREATE TABLE test.t1;
+ SHOW CREATE TABLE test.t2;
+
+ INSERT INTO test.t1 VALUES (1,'111111','aaaaaaaa');
+ INSERT INTO test.t1 VALUES (2,'222222','bbbbbbbb');
+ SELECT * FROM test.t1 ORDER BY a1;
+ INSERT INTO test.t2(a1,a2,a3) SELECT * FROM test.t1;
+ SELECT * FROM test.t2 ORDER BY a1;
+
+ DROP TABLE test.t1, test.t2;
+
+ # populate BLOB field with large data
+
+set @vc1 = repeat('a', 200);
+set @vc2 = repeat('b', 500);
+set @vc3 = repeat('c', 1000);
+set @vc4 = repeat('d', 4000);
+
+# x0 size 256
+set @x0 = '01234567012345670123456701234567';
+set @x0 = concat(@x0,@x0,@x0,@x0,@x0,@x0,@x0,@x0);
+
+# b1 length 2000+256
+set @b1 = 'b1';
+set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1);
+set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1);
+set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1);
+set @b1 = concat(@b1,@x0);
+# d1 length 3000
+set @d1 = 'dd1';
+set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1);
+set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1);
+set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1);
+
+# b2 length 20000
+set @b2 = 'b2';
+set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
+set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
+set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
+set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
+# d2 length 30000
+set @d2 = 'dd2';
+set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);
+set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);
+set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);
+set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);
+
+select length(@x0),length(@b1),length(@d1) from dual;
+select length(@x0),length(@b2),length(@d2) from dual;
+
+ CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(5000), a3 BLOB)
+ TABLESPACE ts1 STORAGE DISK ENGINE=NDB;
+ CREATE TABLE test.t2 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(5000), a3 BLOB)
+ TABLESPACE ts2 STORAGE DISK ENGINE=NDB;
+
+ SHOW CREATE TABLE test.t1;
+ SHOW CREATE TABLE test.t2;
+
+ INSERT INTO test.t1 VALUES (1,@vc1,@d1);
+ INSERT INTO test.t1 VALUES (2,@vc2,@b1);
+ INSERT INTO test.t1 VALUES (3,@vc3,@d2);
+ INSERT INTO test.t1 VALUES (4,@vc4,@b2);
+
+ SELECT a1,length(a2),substr(a2,180,2),length(a3),substr(a3,1+3*900,3)
+ FROM test.t1 WHERE a1=1;
+ SELECT a1,length(a2),substr(a2,480,2),length(a3),substr(a3,1+2*900,3)
+ FROM test.t1 where a1=2;
+
+ INSERT INTO test.t2(a1,a2,a3) SELECT * FROM test.t1;
+ SELECT a1,length(a2),substr(a2,180,2),length(a3),substr(a3,1+3*900,3)
+ FROM test.t2 WHERE a1=1;
+ SELECT a1,length(a2),substr(a2,480,2),length(a3),substr(a3,1+2*900,3)
+ FROM test.t2 where a1=2;
+
+
+ DROP TABLE test.t1, test.t2;
+
+
+ ALTER TABLESPACE ts1
+ DROP DATAFILE './table_space1/datafile.dat'
+ ENGINE NDB;
+ DROP TABLESPACE ts1 ENGINE NDB;
+
+ ALTER TABLESPACE ts2
+ DROP DATAFILE './table_space2/datafile.dat'
+ ENGINE NDB;
+ DROP TABLESPACE ts2 ENGINE NDB;
+
+ DROP LOGFILE GROUP lg
+ ENGINE=NDB;
+
+#### Insert, Update, Delete from NDB table with BLOB fields ####
+--echo *****
+--echo **** Insert, Update, Delete from NDB table with BLOB fields
+--echo *****
+
+ CREATE LOGFILE GROUP lg
+ ADD UNDOFILE './lg_group/undofile.dat'
+ INITIAL_SIZE 16M
+ UNDO_BUFFER_SIZE = 1M
+ ENGINE=NDB;
+
+ CREATE TABLESPACE ts1
+ ADD DATAFILE './table_space1/datafile.dat'
+ USE LOGFILE GROUP lg
+ INITIAL_SIZE 12M
+ ENGINE NDB;
+
+set @vc1 = repeat('a', 200);
+set @vc2 = repeat('b', 500);
+set @vc3 = repeat('c', 1000);
+set @vc4 = repeat('d', 4000);
+set @vc5 = repeat('d', 5000);
+
+set @bb1 = repeat('1', 2000);
+set @bb2 = repeat('2', 5000);
+set @bb3 = repeat('3', 10000);
+set @bb4 = repeat('4', 40000);
+set @bb5 = repeat('5', 50000);
+
+select length(@vc1),length(@vc2),length(@vc3),length(@vc4),length(@vc5) from dual;
+select length(@bb1),length(@bb2),length(@bb3),length(@bb4),length(@bb5) from dual;
+
+ CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(5000), a3 BLOB)
+ TABLESPACE ts1 STORAGE DISK ENGINE=NDB;
+# CREATE TABLE test.t2 (a1 int NOT NULL, a2 VARCHAR(5000), a3 BLOB)
+# TABLESPACE ts2 STORAGE DISK ENGINE=NDB;
+
+ INSERT INTO test.t1 VALUES (1,@vc1,@bb1);
+ INSERT INTO test.t1 VALUES (2,@vc2,@bb2);
+ INSERT INTO test.t1 VALUES (3,@vc3,@bb3);
+ INSERT INTO test.t1 VALUES (4,@vc4,@bb4);
+ INSERT INTO test.t1 VALUES (5,@vc5,@bb5);
+
+ UPDATE test.t1 SET a2=@vc5, a3=@bb5 WHERE a1=1;
+ SELECT a1,length(a2),substr(a2,4998,2),length(a3),substr(a3,49997,3)
+ FROM test.t1 WHERE a1=1;
+
+ UPDATE test.t1 SET a2=@vc4, a3=@bb4 WHERE a1=2;
+ SELECT a1,length(a2),substr(a2,3998,2),length(a3),substr(a3,39997,3)
+ FROM test.t1 WHERE a1=2;
+
+ UPDATE test.t1 SET a2=@vc2, a3=@bb2 WHERE a1=3;
+ SELECT a1,length(a2),substr(a2,498,2),length(a3),substr(a3,3997,3)
+ FROM test.t1 WHERE a1=3;
+
+ UPDATE test.t1 SET a2=@vc3, a3=@bb3 WHERE a1=4;
+ SELECT a1,length(a2),substr(a2,998,2),length(a3),substr(a3,9997,3)
+ FROM test.t1 WHERE a1=4;
+
+ UPDATE test.t1 SET a2=@vc1, a3=@bb1 WHERE a1=5;
+ SELECT a1,length(a2),substr(a2,198,2),length(a3),substr(a3,1997,3)
+ FROM test.t1 WHERE a1=5;
+
+ DELETE FROM test.t1 where a1=5;
+ SELECT count(*) from test.t1;
+ DELETE FROM test.t1 where a1=4;
+ SELECT count(*) from test.t1;
+ DELETE FROM test.t1 where a1=3;
+ SELECT count(*) from test.t1;
+ DELETE FROM test.t1 where a1=2;
+ SELECT count(*) from test.t1;
+ DELETE FROM test.t1 where a1=1;
+ SELECT count(*) from test.t1;
+
+ DROP TABLE test.t1;
+
+ ALTER TABLESPACE ts1
+ DROP DATAFILE './table_space1/datafile.dat'
+ ENGINE NDB;
+ DROP TABLESPACE ts1 ENGINE NDB;
+
+ DROP LOGFILE GROUP lg
+ ENGINE=NDB;
+
+##### Create Stored procedures that use disk based tables #####
+--echo *****
+--echo **** Create Stored procedures that use disk based tables
+--echo *****
+
+ CREATE LOGFILE GROUP lg
+ ADD UNDOFILE './lg_group/undofile.dat'
+ INITIAL_SIZE 16M
+ UNDO_BUFFER_SIZE = 1M
+ ENGINE=NDB;
+
+ CREATE TABLESPACE ts1
+ ADD DATAFILE './table_space1/datafile.dat'
+ USE LOGFILE GROUP lg
+ INITIAL_SIZE 12M
+ ENGINE NDB;
+
+delimiter //;
+ CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB)
+ TABLESPACE ts1 STORAGE DISK ENGINE=NDB//
+ CREATE PROCEDURE test.sp1()
+ BEGIN
+ INSERT INTO test.t1 values (1,'111111','aaaaaaaa');
+ END//
+delimiter ;//
+
+ CALL test.sp1();
+ SELECT * FROM test.t1;
+
+delimiter //;
+ CREATE PROCEDURE test.sp2(n INT, vc VARCHAR(256), blb BLOB)
+ BEGIN
+ UPDATE test.t1 SET a2=vc, a3=blb where a1=n;
+ END//
+delimiter ;//
+
+ CALL test.sp2(1,'222222','bbbbbbbb');
+ SELECT * FROM test.t1;
+
+ DELETE FROM test.t1;
+ DROP PROCEDURE test.sp1;
+ DROP PROCEDURE test.sp2;
+ DROP TABLE test.t1;
+
+ ALTER TABLESPACE ts1
+ DROP DATAFILE './table_space1/datafile.dat'
+ ENGINE=NDB;
+ DROP TABLESPACE ts1 ENGINE=NDB;
+
+ DROP LOGFILE GROUP lg
+ ENGINE=NDB;
+
+#### Create function that operate on disk based tables ####
+--echo *****
+--echo ***** Create function that operate on disk based tables
+--echo *****
+
+ CREATE LOGFILE GROUP lg
+ ADD UNDOFILE './lg_group/undofile.dat'
+ INITIAL_SIZE 16M
+ UNDO_BUFFER_SIZE = 1M
+ ENGINE=NDB;
+
+ CREATE TABLESPACE ts1
+ ADD DATAFILE './table_space1/datafile.dat'
+ USE LOGFILE GROUP lg
+ INITIAL_SIZE 12M
+ ENGINE NDB;
+
+ CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB)
+ TABLESPACE ts1 STORAGE DISK ENGINE=NDB;
+ let $1=100;
+ disable_query_log;
+ while ($1)
+ {
+ eval insert into test.t1 values($1, "aaaaa$1", "bbbbb$1");
+ dec $1;
+ }
+ enable_query_log;
+
+ delimiter //;
+ CREATE FUNCTION test.fn1(n INT) RETURNS INT
+ BEGIN
+ DECLARE v INT;
+ SELECT a1 INTO v FROM test.t1 WHERE a1=n;
+ RETURN v;
+ END//
+ delimiter ;//
+
+delimiter //;
+ CREATE FUNCTION test.fn2(n INT, blb BLOB) RETURNS BLOB
+ BEGIN
+ DECLARE vv BLOB;
+ UPDATE test.t1 SET a3=blb where a1=n;
+ SELECT a3 INTO vv FROM test.t1 WHERE a1=n;
+ RETURN vv;
+ END//
+ delimiter ;//
+
+ SELECT test.fn1(10) FROM DUAL;
+ SELECT test.fn2(50, 'new BLOB content') FROM DUAL;
+
+ DELETE FROM test.t1;
+ DROP FUNCTION test.fn1;
+ DROP FUNCTION test.fn2;
+ DROP TABLE test.t1;
+
+ ALTER TABLESPACE ts1
+ DROP DATAFILE './table_space1/datafile.dat'
+ ENGINE=NDB;
+ DROP TABLESPACE ts1 ENGINE=NDB;
+
+ DROP LOGFILE GROUP lg
+ ENGINE=NDB;
+
+#### Create triggers that operate on disk based tables ####
+--echo *****
+--echo ***** Create triggers that operate on disk based tables
+--echo *****
+
+ CREATE LOGFILE GROUP lg
+ ADD UNDOFILE './lg_group/undofile.dat'
+ INITIAL_SIZE 16M
+ UNDO_BUFFER_SIZE = 1M
+ ENGINE=NDB;
+
+ CREATE TABLESPACE ts1
+ ADD DATAFILE './table_space1/datafile.dat'
+ USE LOGFILE GROUP lg
+ INITIAL_SIZE 12M
+ ENGINE NDB;
+
+ CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB)
+ TABLESPACE ts1 STORAGE DISK ENGINE=NDB;
+
+ delimiter //;
+ CREATE TRIGGER test.trg1 BEFORE INSERT ON test.t1 FOR EACH ROW
+ BEGIN
+ if isnull(new.a2) then
+ set new.a2:= 'trg1 works on a2 field';
+ end if;
+ if isnull(new.a3) then
+ set new.a3:= 'trg1 works on a3 field';
+ end if;
+ end//
+ insert into test.t1 (a1) values (1)//
+ insert into test.t1 (a1,a2) values (2, 'ccccccc')//
+ select * from test.t1 order by a1//
+ delimiter ;//
+
+ DELETE FROM test.t1;
+ DROP TRIGGER test.trg1;
+ DROP TABLE test.t1;
+
+ ALTER TABLESPACE ts1
+ DROP DATAFILE './table_space1/datafile.dat'
+ ENGINE=NDB;
+ DROP TABLESPACE ts1 ENGINE=NDB;
+
+ DROP LOGFILE GROUP lg
+ ENGINE=NDB;
+
+#### Create, update views that operate on disk based tables ####
+--echo *****
+--echo ***** Create, update views that operate on disk based tables
+--echo *****
+
+ CREATE LOGFILE GROUP lg
+ ADD UNDOFILE './lg_group/undofile.dat'
+ INITIAL_SIZE 16M
+ UNDO_BUFFER_SIZE = 1M
+ ENGINE=NDB;
+
+ CREATE TABLESPACE ts1
+ ADD DATAFILE './table_space1/datafile.dat'
+ USE LOGFILE GROUP lg
+ INITIAL_SIZE 12M
+ ENGINE NDB;
+
+ CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB)
+ TABLESPACE ts1 STORAGE DISK ENGINE=NDB;
+ let $1=10;
+ disable_query_log;
+ while ($1)
+ {
+ eval insert into test.t1 values($1, "aaaaa$1", "bbbbb$1");
+ dec $1;
+ }
+ enable_query_log;
+ CREATE VIEW test.v1 AS SELECT * FROM test.t1;
+ SELECT * FROM test.v1 order by a1;
+ CHECK TABLE test.v1, test.t1;
+
+ UPDATE test.v1 SET a2='zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz' WHERE a1=5;
+ SELECT * FROM test.v1 order by a1;
+
+ DROP VIEW test.v1;
+ DELETE FROM test.t1;
+ DROP TABLE test.t1;
+
+ ALTER TABLESPACE ts1
+ DROP DATAFILE './table_space1/datafile.dat'
+ ENGINE=NDB;
+ DROP TABLESPACE ts1 ENGINE=NDB;
+
+ DROP LOGFILE GROUP lg
+ ENGINE=NDB;
+
+#### Create and use disk based table that use auto inc ####
+--echo *****
+--echo ***** Create and use disk based table that use auto inc
+--echo *****
+
+ CREATE LOGFILE GROUP lg
+ ADD UNDOFILE './lg_group/undofile.dat'
+ INITIAL_SIZE 16M
+ UNDO_BUFFER_SIZE = 1M
+ ENGINE=NDB;
+
+ CREATE TABLESPACE ts1
+ ADD DATAFILE './table_space1/datafile.dat'
+ USE LOGFILE GROUP lg
+ INITIAL_SIZE 12M
+ ENGINE NDB;
+
+ CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB;
+ let $1=10;
+ disable_query_log;
+ while ($1)
+ {
+ eval insert into test.t1 values(NULL, "aaaaa$1", "bbbbb$1");
+ dec $1;
+ }
+ enable_query_log;
+ SELECT * FROM test.t1 ORDER BY a1;
+ DELETE FROM test.t1;
+ DROP TABLE test.t1;
+
+ ALTER TABLESPACE ts1
+ DROP DATAFILE './table_space1/datafile.dat'
+ ENGINE=NDB;
+ DROP TABLESPACE ts1 ENGINE=NDB;
+
+ DROP LOGFILE GROUP lg
+ ENGINE=NDB;
+
+#### Create test that use transaction (commit, rollback) ####
+--echo *****
+--echo ***** Create test that use transaction (commit, rollback)
+--echo *****
+
+ CREATE LOGFILE GROUP lg
+ ADD UNDOFILE './lg_group/undofile.dat'
+ INITIAL_SIZE 16M
+ UNDO_BUFFER_SIZE = 1M
+ ENGINE=NDB;
+
+ CREATE TABLESPACE ts1
+ ADD DATAFILE './table_space1/datafile.dat'
+ USE LOGFILE GROUP lg
+ INITIAL_SIZE 12M
+ ENGINE NDB;
+
+ SET AUTOCOMMIT=0;
+ CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB;
+
+ INSERT INTO test.t1 VALUES(NULL, "aaaaa1", "bbbbb1");
+ COMMIT;
+ SELECT * FROM test.t1 ORDER BY a1;
+ INSERT INTO test.t1 VALUES(NULL, "aaaaa2", "bbbbb2");
+ ROLLBACK;
+ SELECT * FROM test.t1 ORDER BY a1;
+
+ DELETE FROM test.t1;
+ DROP TABLE test.t1;
+ SET AUTOCOMMIT=1;
+
+# Now do the same thing with START TRANSACTION without using AUTOCOMMIT.
+
+ CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB;
+
+ START TRANSACTION;
+ INSERT INTO test.t1 VALUES(NULL, "aaaaa1", "bbbbb1");
+ COMMIT;
+ SELECT * FROM test.t1 ORDER BY a1;
+
+ START TRANSACTION;
+ INSERT INTO test.t1 VALUES(NULL, "aaaaa2", "bbbbb2");
+ ROLLBACK;
+ SELECT * FROM test.t1 ORDER BY a1;
+
+ DELETE FROM test.t1;
+ DROP TABLE test.t1;
+
+ ALTER TABLESPACE ts1
+ DROP DATAFILE './table_space1/datafile.dat'
+ ENGINE=NDB;
+ DROP TABLESPACE ts1 ENGINE=NDB;
+
+ DROP LOGFILE GROUP lg
+ ENGINE=NDB;
+
+#### Create test that uses locks ####
+--echo *****
+--echo ***** Create test that uses locks
+--echo *****
+
+ connect (con1,localhost,root,,);
+ connect (con2,localhost,root,,);
+
+ CREATE LOGFILE GROUP lg
+ ADD UNDOFILE './lg_group/undofile.dat'
+ INITIAL_SIZE 16M
+ UNDO_BUFFER_SIZE = 1M
+ ENGINE=NDB;
+
+ CREATE TABLESPACE ts1
+ ADD DATAFILE './table_space1/datafile.dat'
+ USE LOGFILE GROUP lg
+ INITIAL_SIZE 12M
+ ENGINE NDB;
+
+# connection con1;
+--disable_warnings
+ drop table if exists test.t1;
+ CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB;
+--enable_warnings
+
+ LOCK TABLES test.t1 write;
+ INSERT INTO test.t1 VALUES(NULL, "aaaaa1", "bbbbb1");
+ INSERT INTO test.t1 VALUES(NULL, "aaaaa2", "bbbbb2");
+ SELECT * FROM test.t1 ORDER BY a1;
+
+ connection con2;
+ SELECT * FROM test.t1 ORDER BY a1;
+ INSERT INTO test.t1 VALUES(NULL, "aaaaa3", "bbbbb3");
+
+ connection con1;
+ UNLOCK TABLES;
+
+ connection con2;
+ INSERT INTO test.t1 VALUES(NULL, "aaaaa3", "bbbbb3");
+ SELECT * FROM test.t1 ORDER BY a1;
+ DELETE FROM test.t1;
+ DROP TABLE test.t1;
+
+ #connection defualt;
+
+ ALTER TABLESPACE ts1
+ DROP DATAFILE './table_space1/datafile.dat'
+ ENGINE=NDB;
+ DROP TABLESPACE ts1 ENGINE=NDB;
+
+ DROP LOGFILE GROUP lg
+ ENGINE=NDB;
+
+#### Create large disk base table, do random queries, check cache hits ####
+--echo *****
+--echo ***** Create large disk base table, do random queries, check cache hits
+--echo *****
+
+set @vc1 = repeat('a', 200);
+SELECT @vc1 FROM DUAL;
+set @vc2 = repeat('b', 500);
+set @vc3 = repeat('b', 998);
+
+# x0 size 256
+set @x0 = '01234567012345670123456701234567';
+set @x0 = concat(@x0,@x0,@x0,@x0,@x0,@x0,@x0,@x0);
+
+# b1 length 2000+256 (blob part aligned)
+set @b1 = 'b1';
+set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1);
+set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1);
+set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1);
+set @b1 = concat(@b1,@x0);
+# d1 length 3000
+set @d1 = 'dd1';
+set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1);
+set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1);
+set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1);
+
+# b2 length 20000
+set @b2 = 'b2';
+set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
+set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
+set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
+set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
+# d2 length 30000
+set @d2 = 'dd2';
+set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);
+set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);
+set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);
+set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);
+
+select length(@x0),length(@b1),length(@d1) from dual;
+select length(@x0),length(@b2),length(@d2) from dual;
+
+ CREATE LOGFILE GROUP lg
+ ADD UNDOFILE './lg_group/undofile.dat'
+ INITIAL_SIZE 16M
+ UNDO_BUFFER_SIZE = 1M
+ ENGINE=NDB;
+
+ CREATE TABLESPACE ts1
+ ADD DATAFILE './table_space1/datafile.dat'
+ USE LOGFILE GROUP lg
+ INITIAL_SIZE 12M
+ ENGINE NDB;
+
+ CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(1000), a3 BLOB)
+ TABLESPACE ts1 STORAGE DISK ENGINE=NDB;
+
+ INSERT INTO test.t1 values(1,@vc1,@d1);
+ INSERT INTO test.t1 values(2,@vc2,@d2);
+ explain SELECT * from test.t1 WHERE a1 = 1;
+
+ SELECT a1,length(a2),substr(a2,1+2*900,2),length(a3),substr(a3,1+3*900,3)
+ FROM test.t1 WHERE a1=1 ORDER BY a1;
+ SELECT a1,length(a2),substr(a2,1+2*9000,2),length(a3),substr(a3,1+3*9000,3)
+ FROM test.t1 where a1=2 ORDER BY a1;
+
+ UPDATE test.t1 set a2=@vc2,a3=@d2 where a1=1;
+ UPDATE test.t1 set a2=@vc1,a3=@d1 where a1=2;
+
+ SELECT a1,length(a2),substr(a2,1+2*9000,2),length(a3),substr(a3,1+3*9000,3)
+ FROM test.t1 where a1=1;
+ SELECT a1,length(a2),substr(a2,1+2*900,2),length(a3),substr(a3,1+3*900,3)
+ FROM test.t1 where a1=2;
+
+ #SHOW VARIABLES LIKE 'have_query_cache';
+ #SHOW STATUS LIKE 'Qcache%';
+
+ DELETE FROM test.t1;
+ DROP TABLE test.t1;
+
+ ALTER TABLESPACE ts1
+ DROP DATAFILE './table_space1/datafile.dat'
+ ENGINE=NDB;
+ DROP TABLESPACE ts1 ENGINE=NDB;
+
+ DROP LOGFILE GROUP lg
+ ENGINE=NDB;
+
+#### Create test that uses COUNT(), SUM(), MAX(), MIN(), NOW(), USER(), TRUNCATE ####
+--echo *****
+--echo ***** Create test that uses COUNT(), SUM(), MAX(), MIN(), NOW(), USER(), TRUNCATE
+--echo *****
+
+ CREATE LOGFILE GROUP lg
+ ADD UNDOFILE './lg_group/undofile.dat'
+ INITIAL_SIZE 16M
+ UNDO_BUFFER_SIZE = 1M
+ ENGINE=NDB;
+
+ CREATE TABLESPACE ts1
+ ADD DATAFILE './table_space1/datafile.dat'
+ USE LOGFILE GROUP lg
+ INITIAL_SIZE 12M
+ ENGINE NDB;
+
+ CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB, a4 DATE, a5 CHAR(250))
+ TABLESPACE ts1 STORAGE DISK ENGINE=NDB;
+ let $1=100;
+ disable_query_log;
+ while ($1)
+ {
+ eval insert into test.t1 values($1, "aaaaaaaaaaaaaaaa$1", "bbbbbbbbbbbbbbbbbb$1", '2006-06-20' , USER());
+ dec $1;
+ }
+ enable_query_log;
+
+ SELECT COUNT(*) from test.t1;
+ SELECT SUM(a1) from test.t1;
+ SELECT MIN(a1) from test.t1;
+ SELECT MAX(a1) from test.t1;
+ SELECT a5 from test.t1 where a1=50;
+
+
+ SELECT * from test.t1 order by a1;
+
+ DROP TABLE test.t1;
+
+ ALTER TABLESPACE ts1
+ DROP DATAFILE './table_space1/datafile.dat'
+ ENGINE=NDB;
+ DROP TABLESPACE ts1 ENGINE=NDB;
+
+ DROP LOGFILE GROUP lg
+ ENGINE=NDB;
+
+
+#End 5.1 test case
+
diff --git a/mysql-test/t/ndb_dd_backuprestore.test b/mysql-test/t/ndb_dd_backuprestore.test
index be6d73e27b4..48db8ec3e0b 100644
--- a/mysql-test/t/ndb_dd_backuprestore.test
+++ b/mysql-test/t/ndb_dd_backuprestore.test
@@ -5,6 +5,12 @@
########################################
-- source include/have_ndb.inc
+-- source include/ndb_default_cluster.inc
+-- source include/not_embedded.inc
+
+--disable_query_log
+set new=on;
+--enable_query_log
--disable_warnings
DROP TABLE IF EXISTS test.t1;
diff --git a/mysql-test/t/ndb_multi.test b/mysql-test/t/ndb_multi.test
index d2dc0561955..36018e6c679 100644
--- a/mysql-test/t/ndb_multi.test
+++ b/mysql-test/t/ndb_multi.test
@@ -3,7 +3,6 @@
-- source include/not_embedded.inc
-- source include/have_binlog_format_statement.inc
-
--disable_warnings
connection server2;
drop table if exists t1, t2, t3, t4;
diff --git a/mysql-test/t/ndb_partition_error.test b/mysql-test/t/ndb_partition_error.test
index 06581f1270f..9db2a6a6f6d 100644
--- a/mysql-test/t/ndb_partition_error.test
+++ b/mysql-test/t/ndb_partition_error.test
@@ -10,6 +10,9 @@
drop table if exists t1;
--enable_warnings
+--disable_query_log
+set new=on;
+--enable_query_log
#
# Partition by range, generate node group error
#
diff --git a/mysql-test/t/ndb_partition_key.test b/mysql-test/t/ndb_partition_key.test
index fb0581eb6f6..d8c1b61b94f 100644
--- a/mysql-test/t/ndb_partition_key.test
+++ b/mysql-test/t/ndb_partition_key.test
@@ -192,7 +192,10 @@ CREATE TABLE t1 (
c3 INT NOT NULL,
PRIMARY KEY(c1,c3))
ENGINE=NDB
- PARTITION BY KEY(c3);
+ PARTITION BY KEY(c3)
+ (PARTITION p0 NODEGROUP 0, PARTITION p1 NODEGROUP 0);
ALTER TABLE t1 ADD COLUMN c4 INT AFTER c1;
+SELECT NODEGROUP,PARTITION_NAME FROM information_schema.partitions WHERE
+table_name = "t1";
DROP TABLE t1;
diff --git a/mysql-test/t/ndb_partition_list.test b/mysql-test/t/ndb_partition_list.test
index 2ad37b8768c..ccfcdbc84f4 100644
--- a/mysql-test/t/ndb_partition_list.test
+++ b/mysql-test/t/ndb_partition_list.test
@@ -5,6 +5,10 @@
#
#-- source include/have_partition.inc
+--disable_query_log
+set new=on;
+--enable_query_log
+
--disable_warnings
drop table if exists t1;
--enable_warnings
diff --git a/mysql-test/t/ndb_partition_range.test b/mysql-test/t/ndb_partition_range.test
index 981467d4055..7952ba502d2 100644
--- a/mysql-test/t/ndb_partition_range.test
+++ b/mysql-test/t/ndb_partition_range.test
@@ -6,6 +6,10 @@
#
#-- source include/have_partition.inc
+--disable_query_log
+set new=on;
+--enable_query_log
+
--disable_warnings
drop table if exists t1;
--enable_warnings
diff --git a/mysql-test/t/ndb_replace.test b/mysql-test/t/ndb_replace.test
index 94a11f7dfb2..476a607ed44 100644
--- a/mysql-test/t/ndb_replace.test
+++ b/mysql-test/t/ndb_replace.test
@@ -39,6 +39,7 @@ INSERT INTO t1 VALUES (1,1,23),(2,2,24);
REPLACE INTO t1 (j,k) VALUES (1,42);
REPLACE INTO t1 (i,j) VALUES (17,2);
SELECT * from t1 ORDER BY i;
+DROP TABLE t1;
# bug#19906
CREATE TABLE t2 (a INT(11) NOT NULL,
@@ -64,4 +65,40 @@ SELECT * FROM t2 ORDER BY id;
DROP TABLE t2;
+#
+# Bug #20728 "REPLACE does not work correctly for NDB table with PK and
+# unique index"
+#
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+create table t1 (pk int primary key, apk int unique, data int) engine=ndbcluster;
+# Test for plain replace which updates pk
+insert into t1 values (1, 1, 1), (2, 2, 2), (3, 3, 3);
+replace into t1 (pk, apk) values (4, 1), (5, 2);
+select * from t1 order by pk;
+delete from t1;
+# Another test for plain replace which doesn't touch pk
+insert into t1 values (1, 1, 1), (2, 2, 2), (3, 3, 3);
+replace into t1 (pk, apk) values (1, 4), (2, 5);
+select * from t1 order by pk;
+delete from t1;
+# Test for load data replace which updates pk
+insert into t1 values (1, 1, 1), (4, 4, 4), (6, 6, 6);
+load data infile '../std_data_ln/loaddata5.dat' replace into table t1 fields terminated by '' enclosed by '' ignore 1 lines (pk, apk);
+select * from t1 order by pk;
+delete from t1;
+# Now test for load data replace which doesn't touch pk
+insert into t1 values (1, 1, 1), (3, 3, 3), (5, 5, 5);
+load data infile '../std_data_ln/loaddata5.dat' replace into table t1 fields terminated by '' enclosed by '' ignore 1 lines (pk, apk);
+select * from t1 order by pk;
+delete from t1;
+# Finally test for both types of replace ... select
+insert into t1 values (1, 1, 1), (2, 2, 2), (3, 3, 3);
+replace into t1 (pk, apk) select 4, 1;
+replace into t1 (pk, apk) select 2, 4;
+select * from t1 order by pk;
+# Clean-up
+drop table t1;
+--echo End of 5.0 tests.
diff --git a/mysql-test/t/ndb_restore.test b/mysql-test/t/ndb_restore.test
index f11324492c2..9030dfbe304 100644
--- a/mysql-test/t/ndb_restore.test
+++ b/mysql-test/t/ndb_restore.test
@@ -4,8 +4,8 @@
--disable_warnings
use test;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-drop table if exists t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
+drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
+drop table if exists t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c;
--enable_warnings
CREATE TABLE `t1_c` (
@@ -132,6 +132,13 @@ CREATE TABLE `t9_c` (
) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
INSERT INTO `t9_c` VALUES ('3g4jh8gar2t','joe','q3.net','elredun.com','q3.net','436643316120','436643316939','91341234568968','695595699','1.1.1.1','2.2.6.2','3','86989','34','x','x','2012-03-12 18:35:04','2012-12-05 12:35:04',3123123,9569,6565,1),('4tt45345235','pap','q3plus.qt','q3plus.qt','q3.net','436643316120','436643316939','8956234534568968','5254595969','1.1.1.1','8.6.2.2','4','86989','34','x','x','2012-03-12 12:55:34','2012-12-05 11:20:04',3223433,3369,9565,2),('4545435545','john','q3.net','q3.net','acne.li','436643316120','436643316939','45345234568968','995696699','1.1.1.1','2.9.9.2','2','86998','34','x','x','2012-03-12 11:35:03','2012-12-05 08:50:04',8823123,169,3565,3);
+# Bug #20820
+# auto inc table not handled correctly when restored from cluster backup
+# - before fix ndb_restore would not set auto inc value correct,
+# seen by select below
+CREATE TABLE t10_c (a INT AUTO_INCREMENT KEY) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
+INSERT INTO t10_c VALUES (1),(2),(3);
+
create table t1 engine=myisam as select * from t1_c;
create table t2 engine=myisam as select * from t2_c;
create table t3 engine=myisam as select * from t3_c;
@@ -141,10 +148,11 @@ create table t6 engine=myisam as select * from t6_c;
create table t7 engine=myisam as select * from t7_c;
create table t8 engine=myisam as select * from t8_c;
create table t9 engine=myisam as select * from t9_c;
+create table t10 engine=myisam as select * from t10_c;
--source include/ndb_backup.inc
-drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
+drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c;
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 1 -m -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 2 -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT
@@ -205,33 +213,17 @@ select count(*)
from (select * from t9 union
select * from t9_c) a;
+# Bug #20820 cont'd
+select * from t10_c order by a;
+
#
# Try Partitioned tables as well
#
-ALTER TABLE t1_c
-PARTITION BY RANGE (`capgoaledatta`)
-(PARTITION p0 VALUES LESS THAN MAXVALUE);
-
-ALTER TABLE t2_c
-PARTITION BY LIST(`capgotod`)
-(PARTITION p0 VALUES IN (0,1,2,3,4,5,6));
-
-ALTER TABLE t3_c
-PARTITION BY HASH (`CapGoaledatta`);
-
-ALTER TABLE t5_c
-PARTITION BY HASH (`capfa`)
-PARTITIONS 4;
-
-ALTER TABLE t6_c
-PARTITION BY LINEAR HASH (`relatta`)
-PARTITIONS 4;
-
ALTER TABLE t7_c
PARTITION BY LINEAR KEY (`dardtestard`);
--source include/ndb_backup.inc
-drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
+drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c;
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 1 -m -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 2 -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT
@@ -289,7 +281,7 @@ select count(*)
from (select * from t9 union
select * from t9_c) a;
-drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
+drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c;
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 1 -m -r --ndb-nodegroup_map '(0,0)' --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 2 -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT
@@ -353,7 +345,7 @@ select count(*)
# guaranteed to be from t2_c, this since order of tables in backup
# is none deterministic
#
-drop table t1_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
+drop table t1_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c;
--source include/ndb_backup.inc
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults --core=0 -b $the_backup_id -n 1 -m -r --ndb-nodegroup_map '(0,1)' $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id 2>&1 | grep Translate || true
@@ -362,7 +354,7 @@ drop table t1_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
#
--disable_warnings
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
+drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10;
drop table if exists t2_c;
--enable_warnings
@@ -372,4 +364,4 @@ drop table if exists t2_c;
--exec $NDB_TOOLS_DIR/ndb_select_all --no-defaults -d sys -D , SYSTAB_0 | grep 520093696, | sed "s/,$the_backup_id/,<the_backup_id>/"
-# End of 4.1 tests
+# End of 5.0 tests (4.1 test intermixed to save test time)
diff --git a/mysql-test/t/ndb_trigger.test b/mysql-test/t/ndb_trigger.test
new file mode 100644
index 00000000000..2521ef17842
--- /dev/null
+++ b/mysql-test/t/ndb_trigger.test
@@ -0,0 +1,92 @@
+# Tests which involve triggers and NDB storage engine
+--source include/have_ndb.inc
+--source include/not_embedded.inc
+
+#
+# Test for bug#18437 "Wrong values inserted with a before update
+# trigger on NDB table". SQL-layer didn't properly inform handler
+# about fields which were read and set in triggers. In some cases
+# this resulted in incorrect (garbage) values of OLD variables and
+# lost changes to NEW variables.
+# You can find similar tests for ON INSERT triggers in federated.test
+# since this engine so far is the only engine in MySQL which cares
+# about field mark-up during handler::write_row() operation.
+#
+
+--disable_warnings
+drop table if exists t1, t2, t3;
+--enable_warnings
+
+create table t1 (id int primary key, a int not null, b decimal (63,30) default 0) engine=ndb;
+create table t2 (op char(1), a int not null, b decimal (63,30));
+create table t3 select 1 as i;
+
+delimiter //;
+create trigger t1_bu before update on t1 for each row
+begin
+ insert into t2 values ("u", old.a, old.b);
+ set new.b = old.b + 10;
+end;//
+create trigger t1_bd before delete on t1 for each row
+begin
+ insert into t2 values ("d", old.a, old.b);
+end;//
+delimiter ;//
+insert into t1 values (1, 1, 1.05), (2, 2, 2.05), (3, 3, 3.05), (4, 4, 4.05);
+
+# Check that usual update works as it should
+update t1 set a=5 where a != 3;
+select * from t1 order by id;
+select * from t2 order by op, a, b;
+delete from t2;
+# Check that everything works for multi-update
+update t1, t3 set a=6 where a = 5;
+select * from t1 order by id;
+select * from t2 order by op, a, b;
+delete from t2;
+# Check for delete
+delete from t1 where a != 3;
+select * from t1 order by id;
+select * from t2 order by op, a, b;
+delete from t2;
+# Check for multi-delete
+insert into t1 values (1, 1, 1.05), (2, 2, 2.05), (4, 4, 4.05);
+delete t1 from t1, t3 where a != 3;
+select * from t1 order by id;
+select * from t2 order by op, a, b;
+delete from t2;
+# Check for insert ... on duplicate key update
+insert into t1 values (4, 4, 4.05);
+insert into t1 (id, a) values (4, 1), (3, 1) on duplicate key update a= a + 1;
+select * from t1 order by id;
+select * from t2 order by op, a, b;
+delete from t2;
+# Check for insert ... select ... on duplicate key update
+delete from t3;
+insert into t3 values (4), (3);
+insert into t1 (id, a) (select i, 1 from t3) on duplicate key update a= a + 1;
+select * from t1 order by id;
+select * from t2 order by op, a, b;
+delete from t2;
+# Check for replace
+replace into t1 (id, a) values (4, 1), (3, 1);
+select * from t1 order by id;
+select * from t2 order by op, a, b;
+delete from t1;
+delete from t2;
+# Check for replace ... select ...
+insert into t1 values (3, 1, 1.05), (4, 1, 2.05);
+replace into t1 (id, a) (select i, 2 from t3);
+select * from t1 order by id;
+select * from t2 order by op, a, b;
+delete from t1;
+delete from t2;
+# Check for load data replace
+insert into t1 values (3, 1, 1.05), (5, 2, 2.05);
+load data infile '../std_data_ln/loaddata5.dat' replace into table t1 fields terminated by '' enclosed by '' ignore 1 lines (id, a);
+select * from t1 order by id;
+select * from t2 order by op, a, b;
+
+drop tables t1, t2, t3;
+
+--echo End of 5.0 tests
diff --git a/mysql-test/t/odbc.test b/mysql-test/t/odbc.test
index d4b6fc35e74..6a754bb32a7 100644
--- a/mysql-test/t/odbc.test
+++ b/mysql-test/t/odbc.test
@@ -21,4 +21,14 @@ select * from t1 where a is null;
explain select * from t1 where b is null;
drop table t1;
+#
+# Bug #14553: NULL in WHERE resets LAST_INSERT_ID
+#
+CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY);
+INSERT INTO t1 VALUES (NULL);
+SELECT sql_no_cache a, last_insert_id() FROM t1 WHERE a IS NULL;
+SELECT sql_no_cache a, last_insert_id() FROM t1 WHERE a IS NULL;
+SELECT sql_no_cache a, last_insert_id() FROM t1;
+DROP TABLE t1;
+
# End of 4.1 tests
diff --git a/mysql-test/t/partition.test b/mysql-test/t/partition.test
index 7ef5f66cd9e..d4e930f91ec 100644
--- a/mysql-test/t/partition.test
+++ b/mysql-test/t/partition.test
@@ -835,13 +835,6 @@ alter table t1 rebuild partition;
drop table t1;
#
-# Bug #14673: Wrong InnoDB default row format
-#
-create table t1 (a int) engine=innodb partition by hash(a) ;
-show table status like 't1';
-drop table t1;
-
-#
# Bug #14526: Partitions: indexed searches fail
#
create table t2 (s1 int not null auto_increment, primary key (s1)) partition by list (s1) (partition p1 values in (1),partition p2 values in (2),partition p3 values in (3),partition p4 values in (4));
@@ -931,6 +924,7 @@ delimiter ;|
CALL test.p1(12);
CALL test.p1(13);
drop table t1;
+drop procedure test.p1;
#
# Bug 13520: Problem with delimiters in COMMENT DATA DIRECTORY ..
@@ -1223,4 +1217,87 @@ alter table t1 drop partition p2;
use test;
drop database db99;
+#
+#BUG 17138 Problem with stored procedure and analyze partition
+#
+--disable_warnings
+drop procedure if exists mysqltest_1;
+--enable_warnings
+
+create table t1 (a int)
+partition by list (a)
+(partition p0 values in (0));
+
+insert into t1 values (0);
+delimiter //;
+
+create procedure mysqltest_1 ()
+begin
+ begin
+ declare continue handler for sqlexception begin end;
+ update ignore t1 set a = 1 where a = 0;
+ end;
+ prepare stmt1 from 'alter table t1';
+ execute stmt1;
+end//
+
+call mysqltest_1()//
+delimiter ;//
+drop table t1;
+drop procedure mysqltest_1;
+
+#
+# Bug 20583 Partitions: Crash using index_last
+#
+create table t1 (a int, index(a))
+partition by hash(a);
+insert into t1 values (1),(2);
+select * from t1 ORDER BY a DESC;
+drop table t1;
+
+#
+# Bug 20770 Partitions: DATA DIRECTORY clause change in reorganize
+# doesn't remove old directory
+#
+--disable_query_log
+--exec mkdir $MYSQLTEST_VARDIR/master-data/tmpdata || true
+eval SET @data_dir = 'DATA DIRECTORY = ''$MYSQLTEST_VARDIR/master-data/tmpdata''';
+let $data_directory = `select @data_dir`;
+
+--exec mkdir $MYSQLTEST_VARDIR/master-data/tmpinx || true
+eval SET @inx_dir = 'INDEX DIRECTORY = ''$MYSQLTEST_VARDIR/master-data/tmpinx''';
+let $inx_directory = `select @inx_dir`;
+--enable_query_log
+
+--replace_result $MYSQLTEST_VARDIR "hello"
+eval create table t1 (a int) engine myisam
+partition by range (a)
+subpartition by hash (a)
+(partition p0 VALUES LESS THAN (1) $data_directory $inx_directory
+ (SUBPARTITION subpart00, SUBPARTITION subpart01));
+
+--replace_result $MYSQLTEST_VARDIR "hello"
+--exec ls $MYSQLTEST_VARDIR/master-data/test/t1* || true
+--replace_result $MYSQLTEST_VARDIR "hello"
+--exec ls $MYSQLTEST_VARDIR/master-data/tmpdata/t1* || true
+--replace_result $MYSQLTEST_VARDIR "hello"
+--exec ls $MYSQLTEST_VARDIR/master-data/tmpinx/t1* || true
+--replace_result $MYSQLTEST_VARDIR "hello"
+
+eval ALTER TABLE t1 REORGANIZE PARTITION p0 INTO
+(partition p1 VALUES LESS THAN (1) $data_directory $inx_directory
+ (SUBPARTITION subpart10, SUBPARTITION subpart11),
+ partition p2 VALUES LESS THAN (2) $data_directory $inx_directory
+ (SUBPARTITION subpart20, SUBPARTITION subpart21));
+
+--replace_result $MYSQLTEST_VARDIR "hello"
+--exec ls $MYSQLTEST_VARDIR/master-data/test/t1* || true
+--replace_result $MYSQLTEST_VARDIR "hello"
+--exec ls $MYSQLTEST_VARDIR/master-data/tmpdata/t1* || true
+--replace_result $MYSQLTEST_VARDIR "hello"
+--exec ls $MYSQLTEST_VARDIR/master-data/tmpinx/t1* || true
+
+drop table t1;
+--exec rmdir $MYSQLTEST_VARDIR/master-data/tmpdata || true
+--exec rmdir $MYSQLTEST_VARDIR/master-data/tmpinx || true
--echo End of 5.1 tests
diff --git a/mysql-test/t/partition_hash.test b/mysql-test/t/partition_hash.test
index 8494de98371..3304f30fb1a 100644
--- a/mysql-test/t/partition_hash.test
+++ b/mysql-test/t/partition_hash.test
@@ -10,6 +10,36 @@ drop table if exists t1;
--enable_warnings
#
+# More partition pruning tests, especially on interval walking
+#
+create table t1 (a int unsigned)
+partition by hash(a div 2)
+partitions 4;
+insert into t1 values (null),(0),(1),(2),(3),(4),(5),(6),(7);
+select * from t1 where a < 0;
+select * from t1 where a is null or (a >= 5 and a <= 7);
+select * from t1 where a is null;
+select * from t1 where a is not null;
+select * from t1 where a >= 1 and a < 3;
+select * from t1 where a >= 3 and a <= 5;
+select * from t1 where a > 2 and a < 4;
+select * from t1 where a > 3 and a <= 6;
+select * from t1 where a > 5;
+select * from t1 where a >= 1 and a <= 5;
+explain partitions select * from t1 where a < 0;
+explain partitions select * from t1 where a is null or (a >= 5 and a <= 7);
+explain partitions select * from t1 where a is null;
+explain partitions select * from t1 where a is not null;
+explain partitions select * from t1 where a >= 1 and a < 3;
+explain partitions select * from t1 where a >= 3 and a <= 5;
+explain partitions select * from t1 where a > 2 and a < 4;
+explain partitions select * from t1 where a > 3 and a <= 6;
+explain partitions select * from t1 where a > 5;
+explain partitions select * from t1 where a >= 1 and a <= 5;
+
+drop table t1;
+
+#
# Partition by hash, basic
#
CREATE TABLE t1 (
diff --git a/mysql-test/t/partition_innodb.test b/mysql-test/t/partition_innodb.test
index 51f5b0fec01..a110fb30a3b 100644
--- a/mysql-test/t/partition_innodb.test
+++ b/mysql-test/t/partition_innodb.test
@@ -1,80 +1,10 @@
--- source include/have_innodb.inc
--- source include/have_partition.inc
-SET @max_row = 20;
-let $engine= 'InnoDB';
-let $MAX_VALUE= (2147483646);
-
-let $max_row= `SELECT @max_row`;
-
-# Column list with definition for all tables to be checked
-let $column_list= f_int1 INTEGER,
-f_int2 INTEGER,
-f_char1 CHAR(20),
-f_char2 CHAR(20),
-f_charbig VARCHAR(1000);
-
-let $sub_part_no= 3;
---disable_warnings
-DROP TABLE IF EXISTS t0_template;
---enable_warnings
-eval CREATE TABLE t0_template (
-$column_list ,
-PRIMARY KEY(f_int1))
-ENGINE = MEMORY;
-let $num= `SELECT @max_row`;
-while ($num)
-{
- eval INSERT INTO t0_template
-SET f_int1 = $num, f_int2 = $num, f_char1 = '$num', f_char2 = '$num',
-f_charbig = '===$num===';
- dec $num;
-}
-# 1. Create the table
---disable_warnings
-DROP TABLE IF EXISTS t1;
---enable_warnings
-eval CREATE TABLE t1 (f_date DATE, f_varchar VARCHAR(30)) engine=$engine;
-# 2. Fill the table t1 with records
-INSERT INTO t1 (f_date, f_varchar)
-SELECT CONCAT(CAST((f_int1 + 999) AS CHAR),'-02-10'), CAST(f_char1 AS CHAR)
-FROM t0_template
-WHERE f_int1 + 999 BETWEEN 1000 AND 9999;
-# 3. Calculate the number of inserted records.
-SELECT IF(9999 - 1000 + 1 > @max_row, @max_row , 9999 - 1000 + 1)
- INTO @exp_row_count;
-# DEBUG SELECT @exp_row_count;
-# 4. Print the layout, check Readability
-ALTER TABLE t1 PARTITION BY HASH(CAST(YEAR(f_date) AS SIGNED INTEGER));
---echo # 1.1.5 Add two named partitions + test
-ALTER TABLE t1 ADD PARTITION (PARTITION part1, PARTITION part7);
-drop table t1;
-
-CREATE TABLE t1 (f_date DATE, f_varchar VARCHAR(30))
-ENGINE=InnoDB
-PARTITION BY HASH(CAST(YEAR(f_date) AS SIGNED INTEGER));
-
---echo # This statement crashes the server.
---echo # CREATE partitioned table with three partitions in one step
---echo # would be harmless.
-ALTER TABLE t1 ADD PARTITION PARTITIONS 1;
-
---disable_warnings
-DROP VIEW IF EXISTS v1;
-DROP TABLE IF EXISTS t1;
-DROP TABLE IF EXISTS t0_aux;
-DROP TABLE IF EXISTS t0_definition;
-DROP TABLE IF EXISTS t0_template;
---enable_warnings
+--source include/have_partition.inc
+--source include/have_innodb.inc
#
-# Bug#20086: Can't get data from key partitioned tables with VARCHAR key
+# Bug #14673: Wrong InnoDB default row format
#
-create table t1 (id varchar(64) primary key) engine=innodb
-partition by key(id) partitions 5;
-insert into t1 values ('a');
-insert into t1 values ('aa');
-insert into t1 values ('aaa');
-select * from t1 where id = 'a';
-select * from t1 where id = 'aa';
-select * from t1 where id = 'aaa';
+create table t1 (a int) engine=innodb partition by hash(a) ;
+show table status like 't1';
drop table t1;
+
diff --git a/mysql-test/t/partition_list.test b/mysql-test/t/partition_list.test
index e243ec468e1..1e420cfe6ed 100644
--- a/mysql-test/t/partition_list.test
+++ b/mysql-test/t/partition_list.test
@@ -10,6 +10,49 @@ drop table if exists t1;
--enable_warnings
#
+# Bug 20733: Zerofill columns gives wrong result with partitioned tables
+#
+create table t1 (a int unsigned)
+partition by list (a)
+(partition p0 values in (0),
+ partition p1 values in (1),
+ partition pnull values in (null),
+ partition p2 values in (2));
+
+insert into t1 values (null),(0),(1),(2);
+select * from t1 where a < 2;
+select * from t1 where a <= 0;
+select * from t1 where a < 1;
+select * from t1 where a > 0;
+select * from t1 where a > 1;
+select * from t1 where a >= 0;
+select * from t1 where a >= 1;
+select * from t1 where a is null;
+select * from t1 where a is not null;
+select * from t1 where a is null or a > 0;
+drop table t1;
+
+create table t1 (a int unsigned, b int)
+partition by list (a)
+subpartition by hash (b)
+subpartitions 2
+(partition p0 values in (0),
+ partition p1 values in (1),
+ partition pnull values in (null, 2),
+ partition p3 values in (3));
+insert into t1 values (0,0),(0,1),(1,0),(1,1),(null,0),(null,1);
+insert into t1 values (2,0),(2,1),(3,0),(3,1);
+
+explain partitions select * from t1 where a is null;
+select * from t1 where a is null;
+explain partitions select * from t1 where a = 2;
+select * from t1 where a = 2;
+select * from t1 where a <= 0;
+select * from t1 where a < 3;
+select * from t1 where a >= 1 or a is null;
+drop table t1;
+
+#
# Test ordinary list partitioning that it works ok
#
CREATE TABLE t1 (
@@ -136,3 +179,4 @@ insert into t1 values (null);
select * from t1;
drop table t1;
+
diff --git a/mysql-test/t/partition_mgm.test b/mysql-test/t/partition_mgm.test
index cfb76192de4..39512de154f 100644
--- a/mysql-test/t/partition_mgm.test
+++ b/mysql-test/t/partition_mgm.test
@@ -12,7 +12,21 @@ ALTER TABLE t1 COALESCE PARTITION 1;
SHOW CREATE TABLE t1;
--replace_result $MYSQLTEST_VARDIR "hello"
--exec ls $MYSQLTEST_VARDIR/master-data/test/t1*
+drop table t1;
+#
+# Bug 20767: REORGANIZE partition crashes
+#
+create table t1 (a int)
+partition by list (a)
+subpartition by hash (a)
+(partition p11 values in (1,2),
+ partition p12 values in (3,4));
+alter table t1 REORGANIZE partition p11, p12 INTO
+(partition p1 values in (1,2,3,4));
+alter table t1 REORGANIZE partition p1 INTO
+(partition p11 values in (1,2),
+ partition p12 values in (3,4));
-
+drop table t1;
diff --git a/mysql-test/t/partition_order.test b/mysql-test/t/partition_order.test
index 1e1b3339d64..ad956361d00 100644
--- a/mysql-test/t/partition_order.test
+++ b/mysql-test/t/partition_order.test
@@ -818,11 +818,27 @@ partitions 2
# Insert a couple of tuples
INSERT into t1 values (1, 1);
INSERT into t1 values (5, NULL);
-INSERT into t1 values (2, 5);
+INSERT into t1 values (2, 4);
+INSERT into t1 values (3, 3);
+INSERT into t1 values (4, 5);
+INSERT into t1 values (7, 1);
+INSERT into t1 values (6, 6);
INSERT into t1 values (30, 4);
INSERT into t1 values (35, 2);
INSERT into t1 values (40, NULL);
select * from t1 force index (b) where b < 10 OR b IS NULL order by b;
+select * from t1 force index (b) where b < 10 ORDER BY b;
+select * from t1 force index (b) where b < 10 ORDER BY b DESC;
+drop table t1;
+create table t1 (a int not null, b int, c varchar(20), key (a,b,c))
+partition by range (b)
+(partition p0 values less than (5),
+ partition p1 values less than (10));
+INSERT into t1 values (1,1,'1'),(2,2,'2'),(1,3,'3'),(2,4,'4'),(1,5,'5');
+INSERT into t1 values (2,6,'6'),(1,7,'7'),(2,8,'8'),(1,9,'9');
+INSERT into t1 values (1, NULL, NULL), (2, NULL, '10');
+select * from t1 where a = 1 order by a desc, b desc;
+select * from t1 where a = 1 order by b desc;
drop table t1;
diff --git a/mysql-test/t/partition_pruning.test b/mysql-test/t/partition_pruning.test
index 5fff2dd49ea..22c15f46af4 100644
--- a/mysql-test/t/partition_pruning.test
+++ b/mysql-test/t/partition_pruning.test
@@ -137,6 +137,32 @@ explain partitions select * from t6 where a >= 3 and a <= 8;
explain partitions select * from t6 where a > 3 and a < 5;
+drop table t6;
+
+create table t6 (a int unsigned not null) partition by LIST(a) (
+ partition p1 values in (1),
+ partition p3 values in (3),
+ partition p5 values in (5),
+ partition p7 values in (7),
+ partition p9 values in (9)
+);
+insert into t6 values (1),(3),(5);
+
+explain partitions select * from t6 where a < 1;
+explain partitions select * from t6 where a <= 1;
+explain partitions select * from t6 where a > 9;
+explain partitions select * from t6 where a >= 9;
+
+explain partitions select * from t6 where a > 0 and a < 5;
+explain partitions select * from t6 where a > 5 and a < 12;
+explain partitions select * from t6 where a > 3 and a < 8 ;
+
+explain partitions select * from t6 where a >= 0 and a <= 5;
+explain partitions select * from t6 where a >= 5 and a <= 12;
+explain partitions select * from t6 where a >= 3 and a <= 8;
+
+explain partitions select * from t6 where a > 3 and a < 5;
+
# RANGE(field) partitioning, interval analysis.
create table t7 (a int not null) partition by RANGE(a) (
partition p10 values less than (10),
@@ -162,6 +188,32 @@ explain partitions select * from t7 where a >= 90;
# misc intervals
explain partitions select * from t7 where a > 11 and a < 29;
+drop table t7;
+
+create table t7 (a int unsigned not null) partition by RANGE(a) (
+ partition p10 values less than (10),
+ partition p30 values less than (30),
+ partition p50 values less than (50),
+ partition p70 values less than (70),
+ partition p90 values less than (90)
+);
+insert into t7 values (10),(30),(50);
+
+# leftmost intervals
+explain partitions select * from t7 where a < 5;
+explain partitions select * from t7 where a < 10;
+explain partitions select * from t7 where a <= 10;
+explain partitions select * from t7 where a = 10;
+
+#rightmost intervals
+explain partitions select * from t7 where a < 90;
+explain partitions select * from t7 where a = 90;
+explain partitions select * from t7 where a > 90;
+explain partitions select * from t7 where a >= 90;
+
+# misc intervals
+explain partitions select * from t7 where a > 11 and a < 29;
+
# LIST(monontonic_func) partitioning
create table t8 (a date not null) partition by RANGE(YEAR(a)) (
partition p0 values less than (1980),
diff --git a/mysql-test/t/partition_range.test b/mysql-test/t/partition_range.test
index 8e1e2e72e69..670b9333ab9 100644
--- a/mysql-test/t/partition_range.test
+++ b/mysql-test/t/partition_range.test
@@ -10,6 +10,50 @@ drop table if exists t1;
--enable_warnings
#
+# More checks for partition pruning
+#
+create table t1 (a int unsigned)
+partition by range (a)
+(partition pnull values less than (0),
+ partition p0 values less than (1),
+ partition p1 values less than(2));
+insert into t1 values (null),(0),(1);
+
+select * from t1 where a is null;
+select * from t1 where a >= 0;
+select * from t1 where a < 0;
+select * from t1 where a <= 0;
+select * from t1 where a > 1;
+explain partitions select * from t1 where a is null;
+explain partitions select * from t1 where a >= 0;
+explain partitions select * from t1 where a < 0;
+explain partitions select * from t1 where a <= 0;
+explain partitions select * from t1 where a > 1;
+drop table t1;
+
+create table t1 (a int unsigned, b int unsigned)
+partition by range (a)
+subpartition by hash (b)
+subpartitions 2
+(partition pnull values less than (0),
+ partition p0 values less than (1),
+ partition p1 values less than(2));
+insert into t1 values (null,0),(null,1),(0,0),(0,1),(1,0),(1,1);
+
+select * from t1 where a is null;
+select * from t1 where a >= 0;
+select * from t1 where a < 0;
+select * from t1 where a <= 0;
+select * from t1 where a > 1;
+explain partitions select * from t1 where a is null;
+explain partitions select * from t1 where a >= 0;
+explain partitions select * from t1 where a < 0;
+explain partitions select * from t1 where a <= 0;
+explain partitions select * from t1 where a > 1;
+
+drop table t1;
+
+#
# Partition by range, basic
#
CREATE TABLE t1 (
@@ -555,3 +599,90 @@ reorganize partition p5 into
drop table t1;
+#
+# New test cases for date based partitioning
+#
+CREATE TABLE t1 (a date)
+PARTITION BY RANGE (TO_DAYS(a))
+(PARTITION p3xx VALUES LESS THAN (TO_DAYS('2004-01-01')),
+ PARTITION p401 VALUES LESS THAN (TO_DAYS('2004-02-01')),
+ PARTITION p402 VALUES LESS THAN (TO_DAYS('2004-03-01')),
+ PARTITION p403 VALUES LESS THAN (TO_DAYS('2004-04-01')),
+ PARTITION p404 VALUES LESS THAN (TO_DAYS('2004-05-01')),
+ PARTITION p405 VALUES LESS THAN (TO_DAYS('2004-06-01')),
+ PARTITION p406 VALUES LESS THAN (TO_DAYS('2004-07-01')),
+ PARTITION p407 VALUES LESS THAN (TO_DAYS('2004-08-01')),
+ PARTITION p408 VALUES LESS THAN (TO_DAYS('2004-09-01')),
+ PARTITION p409 VALUES LESS THAN (TO_DAYS('2004-10-01')),
+ PARTITION p410 VALUES LESS THAN (TO_DAYS('2004-11-01')),
+ PARTITION p411 VALUES LESS THAN (TO_DAYS('2004-12-01')),
+ PARTITION p412 VALUES LESS THAN (TO_DAYS('2005-01-01')),
+ PARTITION p501 VALUES LESS THAN (TO_DAYS('2005-02-01')),
+ PARTITION p502 VALUES LESS THAN (TO_DAYS('2005-03-01')),
+ PARTITION p503 VALUES LESS THAN (TO_DAYS('2005-04-01')),
+ PARTITION p504 VALUES LESS THAN (TO_DAYS('2005-05-01')),
+ PARTITION p505 VALUES LESS THAN (TO_DAYS('2005-06-01')),
+ PARTITION p506 VALUES LESS THAN (TO_DAYS('2005-07-01')),
+ PARTITION p507 VALUES LESS THAN (TO_DAYS('2005-08-01')),
+ PARTITION p508 VALUES LESS THAN (TO_DAYS('2005-09-01')),
+ PARTITION p509 VALUES LESS THAN (TO_DAYS('2005-10-01')),
+ PARTITION p510 VALUES LESS THAN (TO_DAYS('2005-11-01')),
+ PARTITION p511 VALUES LESS THAN (TO_DAYS('2005-12-01')),
+ PARTITION p512 VALUES LESS THAN (TO_DAYS('2006-01-01')),
+ PARTITION p601 VALUES LESS THAN (TO_DAYS('2006-02-01')),
+ PARTITION p602 VALUES LESS THAN (TO_DAYS('2006-03-01')),
+ PARTITION p603 VALUES LESS THAN (TO_DAYS('2006-04-01')),
+ PARTITION p604 VALUES LESS THAN (TO_DAYS('2006-05-01')),
+ PARTITION p605 VALUES LESS THAN (TO_DAYS('2006-06-01')),
+ PARTITION p606 VALUES LESS THAN (TO_DAYS('2006-07-01')),
+ PARTITION p607 VALUES LESS THAN (TO_DAYS('2006-08-01')));
+
+INSERT INTO t1 VALUES ('2003-01-13'),('2003-06-20'),('2003-08-30');
+INSERT INTO t1 VALUES ('2003-04-13'),('2003-07-20'),('2003-10-30');
+INSERT INTO t1 VALUES ('2003-05-13'),('2003-11-20'),('2003-12-30');
+
+INSERT INTO t1 VALUES ('2004-01-13'),('2004-01-20'),('2004-01-30');
+INSERT INTO t1 VALUES ('2004-02-13'),('2004-02-20'),('2004-02-28');
+INSERT INTO t1 VALUES ('2004-03-13'),('2004-03-20'),('2004-03-30');
+INSERT INTO t1 VALUES ('2004-04-13'),('2004-04-20'),('2004-04-30');
+INSERT INTO t1 VALUES ('2004-05-13'),('2004-05-20'),('2004-05-30');
+INSERT INTO t1 VALUES ('2004-06-13'),('2004-06-20'),('2004-06-30');
+INSERT INTO t1 VALUES ('2004-07-13'),('2004-07-20'),('2004-07-30');
+INSERT INTO t1 VALUES ('2004-08-13'),('2004-08-20'),('2004-08-30');
+INSERT INTO t1 VALUES ('2004-09-13'),('2004-09-20'),('2004-09-30');
+INSERT INTO t1 VALUES ('2004-10-13'),('2004-10-20'),('2004-10-30');
+INSERT INTO t1 VALUES ('2004-11-13'),('2004-11-20'),('2004-11-30');
+INSERT INTO t1 VALUES ('2004-12-13'),('2004-12-20'),('2004-12-30');
+
+INSERT INTO t1 VALUES ('2005-01-13'),('2005-01-20'),('2005-01-30');
+INSERT INTO t1 VALUES ('2005-02-13'),('2005-02-20'),('2005-02-28');
+INSERT INTO t1 VALUES ('2005-03-13'),('2005-03-20'),('2005-03-30');
+INSERT INTO t1 VALUES ('2005-04-13'),('2005-04-20'),('2005-04-30');
+INSERT INTO t1 VALUES ('2005-05-13'),('2005-05-20'),('2005-05-30');
+INSERT INTO t1 VALUES ('2005-06-13'),('2005-06-20'),('2005-06-30');
+INSERT INTO t1 VALUES ('2005-07-13'),('2005-07-20'),('2005-07-30');
+INSERT INTO t1 VALUES ('2005-08-13'),('2005-08-20'),('2005-08-30');
+INSERT INTO t1 VALUES ('2005-09-13'),('2005-09-20'),('2005-09-30');
+INSERT INTO t1 VALUES ('2005-10-13'),('2005-10-20'),('2005-10-30');
+INSERT INTO t1 VALUES ('2005-11-13'),('2005-11-20'),('2005-11-30');
+INSERT INTO t1 VALUES ('2005-12-13'),('2005-12-20'),('2005-12-30');
+
+INSERT INTO t1 VALUES ('2006-01-13'),('2006-01-20'),('2006-01-30');
+INSERT INTO t1 VALUES ('2006-02-13'),('2006-02-20'),('2006-02-28');
+INSERT INTO t1 VALUES ('2006-03-13'),('2006-03-20'),('2006-03-30');
+INSERT INTO t1 VALUES ('2006-04-13'),('2006-04-20'),('2006-04-30');
+INSERT INTO t1 VALUES ('2006-05-13'),('2006-05-20'),('2006-05-30');
+INSERT INTO t1 VALUES ('2006-06-13'),('2006-06-20'),('2006-06-30');
+INSERT INTO t1 VALUES ('2006-07-13'),('2006-07-20'),('2006-07-30');
+
+SELECT * FROM t1
+WHERE a >= '2004-07-01' AND a <= '2004-09-30';
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE a >= '2004-07-01' AND a <= '2004-09-30';
+SELECT * from t1
+WHERE (a >= '2004-07-01' AND a <= '2004-09-30') OR
+ (a >= '2005-07-01' AND a <= '2005-09-30');
+EXPLAIN PARTITIONS SELECT * from t1
+WHERE (a >= '2004-07-01' AND a <= '2004-09-30') OR
+ (a >= '2005-07-01' AND a <= '2005-09-30');
+DROP TABLE t1;
diff --git a/mysql-test/t/ps.test b/mysql-test/t/ps.test
index 4a336962293..9165fceb85e 100644
--- a/mysql-test/t/ps.test
+++ b/mysql-test/t/ps.test
@@ -491,6 +491,30 @@ deallocate prepare stmt;
drop table t1, t2;
#
+# Bug#19399 "Stored Procedures 'Lost Connection' when dropping/creating
+# tables"
+# Check that multi-delete tables are also cleaned up before re-execution.
+#
+--disable_warnings
+drop table if exists t1;
+create temporary table if not exists t1 (a1 int);
+--enable_warnings
+# exact delete syntax is essential
+prepare stmt from "delete t1 from t1 where (cast(a1/3 as unsigned) * 3) = a1";
+drop temporary table t1;
+create temporary table if not exists t1 (a1 int);
+# the server crashed on the next statement without the fix
+execute stmt;
+drop temporary table t1;
+create temporary table if not exists t1 (a1 int);
+# the problem was in memory corruption: repeat the test just in case
+execute stmt;
+drop temporary table t1;
+create temporary table if not exists t1 (a1 int);
+execute stmt;
+drop temporary table t1;
+deallocate prepare stmt;
+
# Bug#6102 "Server crash with prepared statement and blank after
# function name"
# ensure that stored functions are cached when preparing a statement
@@ -1146,4 +1170,122 @@ execute stmt;
execute stmt;
deallocate prepare stmt;
drop table t1, t2, t3;
+
+#
+# Bug#17199 "Table not found" error occurs if the query contains a call
+# to a function from another database.
+# Test prepared statements- related behaviour.
+#
+#
+# ALTER TABLE RENAME and Prepared Statements: wrong DB name buffer was used
+# in ALTER ... RENAME which caused memory corruption in prepared statements.
+# No need to fix this problem in 4.1 as ALTER TABLE is not allowed in
+# Prepared Statements in 4.1.
+#
+create database mysqltest_long_database_name_to_thrash_heap;
+use test;
+create table t1 (i int);
+prepare stmt from "alter table test.t1 rename t1";
+use mysqltest_long_database_name_to_thrash_heap;
+execute stmt;
+show tables like 't1';
+prepare stmt from "alter table test.t1 rename t1";
+use test;
+execute stmt;
+show tables like 't1';
+use mysqltest_long_database_name_to_thrash_heap;
+show tables like 't1';
+deallocate prepare stmt;
+#
+# Check that a prepared statement initializes its current database at
+# PREPARE, and then works correctly even if the current database has been
+# changed.
+#
+use mysqltest_long_database_name_to_thrash_heap;
+# Necessary for preparation of INSERT/UPDATE/DELETE to succeed
+prepare stmt_create from "create table t1 (i int)";
+prepare stmt_insert from "insert into t1 (i) values (1)";
+prepare stmt_update from "update t1 set i=2";
+prepare stmt_delete from "delete from t1 where i=2";
+prepare stmt_select from "select * from t1";
+prepare stmt_alter from "alter table t1 add column (b int)";
+prepare stmt_alter1 from "alter table t1 drop column b";
+prepare stmt_analyze from "analyze table t1";
+prepare stmt_optimize from "optimize table t1";
+prepare stmt_show from "show tables like 't1'";
+prepare stmt_truncate from "truncate table t1";
+prepare stmt_drop from "drop table t1";
+# Drop the table that was used to prepare INSERT/UPDATE/DELETE: we will
+# create a new one by executing stmt_create
+drop table t1;
+# Switch the current database
+use test;
+# Check that all prepared statements operate on the database that was
+# active at PREPARE
+execute stmt_create;
+# should return empty set
+show tables like 't1';
+use mysqltest_long_database_name_to_thrash_heap;
+show tables like 't1';
+use test;
+execute stmt_insert;
+select * from mysqltest_long_database_name_to_thrash_heap.t1;
+execute stmt_update;
+select * from mysqltest_long_database_name_to_thrash_heap.t1;
+execute stmt_delete;
+execute stmt_select;
+execute stmt_alter;
+show columns from mysqltest_long_database_name_to_thrash_heap.t1;
+execute stmt_alter1;
+show columns from mysqltest_long_database_name_to_thrash_heap.t1;
+execute stmt_analyze;
+execute stmt_optimize;
+execute stmt_show;
+execute stmt_truncate;
+execute stmt_drop;
+show tables like 't1';
+use mysqltest_long_database_name_to_thrash_heap;
+show tables like 't1';
+#
+# Attempt a statement PREPARE when there is no current database:
+# is expected to return an error.
+#
+drop database mysqltest_long_database_name_to_thrash_heap;
+--error ER_NO_DB_ERROR
+prepare stmt_create from "create table t1 (i int)";
+--error ER_NO_DB_ERROR
+prepare stmt_insert from "insert into t1 (i) values (1)";
+--error ER_NO_DB_ERROR
+prepare stmt_update from "update t1 set i=2";
+--error ER_NO_DB_ERROR
+prepare stmt_delete from "delete from t1 where i=2";
+--error ER_NO_DB_ERROR
+prepare stmt_select from "select * from t1";
+--error ER_NO_DB_ERROR
+prepare stmt_alter from "alter table t1 add column (b int)";
+--error ER_NO_DB_ERROR
+prepare stmt_alter1 from "alter table t1 drop column b";
+--error ER_NO_DB_ERROR
+prepare stmt_analyze from "analyze table t1";
+--error ER_NO_DB_ERROR
+prepare stmt_optimize from "optimize table t1";
+--error ER_NO_DB_ERROR
+prepare stmt_show from "show tables like 't1'";
+--error ER_NO_DB_ERROR
+prepare stmt_truncate from "truncate table t1";
+--error ER_NO_DB_ERROR
+prepare stmt_drop from "drop table t1";
+#
+# The above has automatically deallocated all our statements.
+#
+# Attempt to CREATE a temporary table when no DB used: it should fail
+# This proves that no table can be used without explicit specification of
+# its database if there is no current database.
+#
+--error ER_NO_DB_ERROR
+create temporary table t1 (i int);
+#
+# Restore the old environemnt
+#
+use test;
# End of 5.0 tests
diff --git a/mysql-test/t/ps_1general.test b/mysql-test/t/ps_1general.test
index 76da296c0b0..e9b8a1c95b1 100644
--- a/mysql-test/t/ps_1general.test
+++ b/mysql-test/t/ps_1general.test
@@ -324,10 +324,8 @@ execute stmt4;
# history (actions of the bdb engine).
# That is the reason why, we switch the output here off.
# (The real output will be tested in ps_6bdb.test)
---disable_warnings
-prepare stmt4 from ' show engine bdb logs ';
---enable_warnings
--disable_result_log
+prepare stmt4 from ' show engine bdb logs ';
execute stmt4;
--enable_result_log
prepare stmt4 from ' show grants for user ';
diff --git a/mysql-test/t/range.test b/mysql-test/t/range.test
index d1ce1104322..76929cf30e6 100644
--- a/mysql-test/t/range.test
+++ b/mysql-test/t/range.test
@@ -490,6 +490,31 @@ SELECT count(*) FROM t1 WHERE CLIENT='000' AND (ARG1 != ' 1' OR ARG1 != ' 2');
SELECT count(*) FROM t1 WHERE CLIENT='000' AND (ARG1 != ' 2' OR ARG1 != ' 1');
drop table t1;
+# BUG#16168: Wrong range optimizer results, "Use_count: Wrong count ..."
+# warnings in server stderr.
+create table t1 (a int);
+insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+
+CREATE TABLE t2 (
+ pk1 int(11) NOT NULL,
+ pk2 int(11) NOT NULL,
+ pk3 int(11) NOT NULL,
+ pk4 int(11) NOT NULL,
+ filler char(82),
+ PRIMARY KEY (pk1,pk2,pk3,pk4)
+) DEFAULT CHARSET=latin1;
+
+insert into t2 select 1, A.a+10*B.a, 432, 44, 'fillerZ' from t1 A, t1 B;
+INSERT INTO t2 VALUES (2621, 2635, 0, 0,'filler'), (2621, 2635, 1, 0,'filler'),
+ (2621, 2635, 10, 0,'filler'), (2621, 2635, 11, 0,'filler'),
+ (2621, 2635, 14, 0,'filler'), (2621, 2635, 1000015, 0,'filler');
+
+SELECT * FROM t2
+WHERE ((((pk4 =0) AND (pk1 =2621) AND (pk2 =2635)))
+OR ((pk4 =1) AND (((pk1 IN ( 7, 2, 1 ))) OR (pk1 =522)) AND ((pk2 IN ( 0, 2635))))
+) AND (pk3 >=1000000);
+drop table t1, t2;
+
# End of 4.1 tests
#
diff --git a/mysql-test/t/rpl_drop_db.test b/mysql-test/t/rpl_drop_db.test
index 38ad07f72f1..ffdc605b402 100644
--- a/mysql-test/t/rpl_drop_db.test
+++ b/mysql-test/t/rpl_drop_db.test
@@ -56,3 +56,7 @@ connection slave;
stop slave;
#system rm -rf $MYSQLTEST_VARDIR/master-data/mysqltest1;
+connection master;
+use test;
+drop table t1;
+
diff --git a/mysql-test/t/rpl_insert.test b/mysql-test/t/rpl_insert.test
new file mode 100644
index 00000000000..9beaff6bab6
--- /dev/null
+++ b/mysql-test/t/rpl_insert.test
@@ -0,0 +1,41 @@
+
+#
+# Bug#20821: INSERT DELAYED fails to write some rows to binlog
+#
+
+--source include/master-slave.inc
+--source include/not_embedded.inc
+--source include/not_windows.inc
+
+--disable_warnings
+CREATE SCHEMA IF NOT EXISTS mysqlslap;
+USE mysqlslap;
+--enable_warnings
+
+CREATE TABLE t1 (id INT, name VARCHAR(64));
+
+let $query = "INSERT DELAYED INTO t1 VALUES (1, 'Dr. No'), (2, 'From Russia With Love'), (3, 'Goldfinger'), (4, 'Thunderball'), (5, 'You Only Live Twice')";
+--exec $MYSQL_SLAP --silent --concurrency=5 --iterations=200 --query=$query --delimiter=";"
+
+# Wait until all the 5000 inserts has been inserted into the table
+--disable_query_log
+let $counter= 300; # Max 30 seconds wait
+while (`select count(*)!=5000 from mysqlslap.t1`)
+{
+ sleep 0.1;
+ dec $counter;
+ if (!$counter)
+ {
+ Number of records in t1 didnt reach 5000;
+ }
+}
+--enable_query_log
+
+SELECT COUNT(*) FROM mysqlslap.t1;
+sync_slave_with_master;
+SELECT COUNT(*) FROM mysqlslap.t1;
+
+connection master;
+DROP SCHEMA IF EXISTS mysqlslap;
+sync_slave_with_master;
+
diff --git a/mysql-test/t/rpl_ndb_bank.test b/mysql-test/t/rpl_ndb_bank.test
index 3601c841c54..d6a10e4ccac 100644
--- a/mysql-test/t/rpl_ndb_bank.test
+++ b/mysql-test/t/rpl_ndb_bank.test
@@ -121,6 +121,7 @@ RESET MASTER;
CREATE TABLE IF NOT EXISTS cluster.backup_info (id INT, backup_id INT) ENGINE = HEAP;
DELETE FROM cluster.backup_info;
LOAD DATA INFILE '../tmp.dat' INTO TABLE cluster.backup_info FIELDS TERMINATED BY ',';
+--exec rm $MYSQLTEST_VARDIR/tmp.dat || true
--replace_column 1 <the_backup_id>
SELECT @the_backup_id:=backup_id FROM cluster.backup_info;
let the_backup_id=`select @the_backup_id`;
diff --git a/mysql-test/t/rpl_ndb_dd_advance.test b/mysql-test/t/rpl_ndb_dd_advance.test
index 80ff533ec5b..1fe36ecd8a1 100644
--- a/mysql-test/t/rpl_ndb_dd_advance.test
+++ b/mysql-test/t/rpl_ndb_dd_advance.test
@@ -7,6 +7,8 @@
#### Include Section ####
--source include/have_ndb.inc
--source include/have_binlog_format_row.inc
+--source include/ndb_default_cluster.inc
+--source include/not_embedded.inc
#--source include/have_ndb_extra.inc
--source include/master-slave.inc
@@ -439,7 +441,7 @@ CREATE TEMPORARY TABLE IF NOT EXISTS cluster.backup_info (id INT, backup_id INT)
DELETE FROM cluster.backup_info;
LOAD DATA INFILE '../tmp.dat' INTO TABLE cluster.backup_info FIELDS TERMINATED BY ',';
-
+--exec rm $MYSQLTEST_VARDIR/tmp.dat || true
--replace_column 1 <the_backup_id>
SELECT @the_backup_id:=backup_id FROM cluster.backup_info;
diff --git a/mysql-test/t/rpl_ndb_sync.test b/mysql-test/t/rpl_ndb_sync.test
index 95f56609ed7..20d4f5707f8 100644
--- a/mysql-test/t/rpl_ndb_sync.test
+++ b/mysql-test/t/rpl_ndb_sync.test
@@ -1,4 +1,6 @@
--source include/have_ndb.inc
+--source include/ndb_default_cluster.inc
+--source include/not_embedded.inc
--source include/have_binlog_format_row.inc
--source include/master-slave.inc
diff --git a/mysql-test/t/rpl_row_create_table.test b/mysql-test/t/rpl_row_create_table.test
index 8a8ea01d688..3a711e5b496 100644
--- a/mysql-test/t/rpl_row_create_table.test
+++ b/mysql-test/t/rpl_row_create_table.test
@@ -76,7 +76,7 @@ INSERT INTO t7 SELECT a,b FROM tt3;
SELECT * FROM t7 ORDER BY a,b;
# Should be written to the binary log
--replace_regex /table_id: [0-9]+/table_id: #/
-SHOW BINLOG EVENTS FROM 1256;
+SHOW BINLOG EVENTS FROM 1118;
sync_slave_with_master;
SELECT * FROM t7 ORDER BY a,b;
@@ -87,7 +87,7 @@ BEGIN;
INSERT INTO t7 SELECT a,b FROM tt4;
ROLLBACK;
--replace_regex /table_id: [0-9]+/table_id: #/
-SHOW BINLOG EVENTS FROM 1452;
+SHOW BINLOG EVENTS FROM 1314;
SELECT * FROM t7 ORDER BY a,b;
sync_slave_with_master;
SELECT * FROM t7 ORDER BY a,b;
@@ -97,11 +97,12 @@ CREATE TABLE t8 LIKE t4;
CREATE TABLE t9 LIKE tt4;
CREATE TEMPORARY TABLE tt5 LIKE t4;
CREATE TEMPORARY TABLE tt6 LIKE tt4;
+CREATE TEMPORARY TABLE tt7 SELECT 1;
--echo **** On Master ****
--query_vertical SHOW CREATE TABLE t8
--query_vertical SHOW CREATE TABLE t9
--replace_regex /table_id: [0-9]+/table_id: #/
-SHOW BINLOG EVENTS FROM 1548;
+SHOW BINLOG EVENTS FROM 1410;
sync_slave_with_master;
--echo **** On Slave ****
--query_vertical SHOW CREATE TABLE t8
diff --git a/mysql-test/t/rpl_stm_no_op.test b/mysql-test/t/rpl_stm_no_op.test
index d1e0b49abe9..f82bbd8cd55 100644
--- a/mysql-test/t/rpl_stm_no_op.test
+++ b/mysql-test/t/rpl_stm_no_op.test
@@ -89,5 +89,5 @@ select * from t2;
# cleanup
connection master;
-drop table t1;
+drop table t1, t2;
sync_slave_with_master;
diff --git a/mysql-test/t/rpl_switch_stm_row_mixed.test b/mysql-test/t/rpl_switch_stm_row_mixed.test
index 4a79b3995c4..6d282069ba1 100644
--- a/mysql-test/t/rpl_switch_stm_row_mixed.test
+++ b/mysql-test/t/rpl_switch_stm_row_mixed.test
@@ -15,22 +15,22 @@ select @@global.binlog_format, @@session.binlog_format;
CREATE TABLE t1 (a varchar(100));
prepare stmt1 from 'insert into t1 select concat(UUID(),?)';
-set @string="emergency";
-insert into t1 values("work");
+set @string="emergency_1_";
+insert into t1 values("work_2_");
execute stmt1 using @string;
deallocate prepare stmt1;
prepare stmt1 from 'insert into t1 select ?';
-insert into t1 values(concat(UUID(),"work"));
+insert into t1 values(concat(UUID(),"work_3_"));
execute stmt1 using @string;
deallocate prepare stmt1;
-insert into t1 values(concat("for",UUID()));
-insert into t1 select "yesterday";
+insert into t1 values(concat("for_4_",UUID()));
+insert into t1 select "yesterday_5_";
# verify that temp tables prevent a switch to SBR
-create temporary table tmp(a char(3));
-insert into tmp values("see");
+create temporary table tmp(a char(100));
+insert into tmp values("see_6_");
--error ER_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR
set binlog_format=statement;
insert into t1 select * from tmp;
@@ -47,18 +47,18 @@ show session variables like "binlog_format%";
select @@global.binlog_format, @@session.binlog_format;
prepare stmt1 from 'insert into t1 select ?';
-set @string="emergency";
-insert into t1 values("work");
+set @string="emergency_7_";
+insert into t1 values("work_8_");
execute stmt1 using @string;
deallocate prepare stmt1;
prepare stmt1 from 'insert into t1 select ?';
-insert into t1 values("work");
+insert into t1 values("work_9_");
execute stmt1 using @string;
deallocate prepare stmt1;
-insert into t1 values("for");
-insert into t1 select "yesterday";
+insert into t1 values("for_10_");
+insert into t1 select "yesterday_11_";
# test SET DEFAULT (=statement at this point of test)
set binlog_format=default;
@@ -69,18 +69,18 @@ set global binlog_format=default;
select @@global.binlog_format, @@session.binlog_format;
prepare stmt1 from 'insert into t1 select ?';
-set @string="emergency";
-insert into t1 values("work");
+set @string="emergency_12_";
+insert into t1 values("work_13_");
execute stmt1 using @string;
deallocate prepare stmt1;
prepare stmt1 from 'insert into t1 select ?';
-insert into t1 values("work");
+insert into t1 values("work_14_");
execute stmt1 using @string;
deallocate prepare stmt1;
-insert into t1 values("for");
-insert into t1 select "yesterday";
+insert into t1 values("for_15_");
+insert into t1 select "yesterday_16_";
# and now the mixed mode
@@ -90,53 +90,52 @@ set global binlog_format=mixed;
select @@global.binlog_format, @@session.binlog_format;
prepare stmt1 from 'insert into t1 select concat(UUID(),?)';
-set @string="emergency";
-insert into t1 values("work");
+set @string="emergency_17_";
+insert into t1 values("work_18_");
execute stmt1 using @string;
deallocate prepare stmt1;
prepare stmt1 from 'insert into t1 select ?';
-insert into t1 values(concat(UUID(),"work"));
+insert into t1 values(concat(UUID(),"work_19_"));
execute stmt1 using @string;
deallocate prepare stmt1;
-insert into t1 values(concat("for",UUID()));
-insert into t1 select "yesterday";
+insert into t1 values(concat("for_20_",UUID()));
+insert into t1 select "yesterday_21_";
prepare stmt1 from 'insert into t1 select ?';
-insert into t1 values(concat(UUID(),"work"));
+insert into t1 values(concat(UUID(),"work_22_"));
execute stmt1 using @string;
deallocate prepare stmt1;
-insert into t1 values(concat("for",UUID()));
-insert into t1 select "yesterday";
+insert into t1 values(concat("for_23_",UUID()));
+insert into t1 select "yesterday_24_";
# Test of CREATE TABLE SELECT
-create table t2 select UUID();
+create table t2 select rpad(UUID(),100,' ');
create table t3 select 1 union select UUID();
create table t4 select * from t1 where 3 in (select 1 union select 2 union select UUID() union select 3);
create table t5 select * from t1 where 3 in (select 1 union select 2 union select curdate() union select 3);
# what if UUID() is first:
insert into t5 select UUID() from t1 where 3 in (select 1 union select 2 union select 3 union select * from t4);
-# inside a stored procedure (inside a function or trigger won't
-# work)
+# inside a stored procedure
delimiter |;
create procedure foo()
begin
-insert into t1 values("work");
-insert into t1 values(concat("for",UUID()));
-insert into t1 select "yesterday";
+insert into t1 values("work_25_");
+insert into t1 values(concat("for_26_",UUID()));
+insert into t1 select "yesterday_27_";
end|
create procedure foo2()
begin
-insert into t1 values(concat("emergency",UUID()));
-insert into t1 values("work");
-insert into t1 values(concat("for",UUID()));
+insert into t1 values(concat("emergency_28_",UUID()));
+insert into t1 values("work_29_");
+insert into t1 values(concat("for_30_",UUID()));
set session binlog_format=row; # accepted for stored procs
-insert into t1 values("more work");
+insert into t1 values("more work_31_");
set session binlog_format=mixed;
end|
create function foo3() returns bigint unsigned
@@ -145,15 +144,130 @@ begin
insert into t1 values("alarm");
return 100;
end|
+create procedure foo4(x varchar(100))
+begin
+insert into t1 values(concat("work_250_",x));
+insert into t1 select "yesterday_270_";
+end|
delimiter ;|
call foo();
call foo2();
+call foo4("hello");
+call foo4(UUID());
+call foo4("world");
# test that can't SET in a stored function
--error ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT
select foo3();
select * from t1 where a="alarm";
+# Tests of stored functions/triggers/views for BUG#20930 "Mixed
+# binlogging mode does not work with stored functions, triggers,
+# views"
+
+# Function which calls procedure
+drop function foo3;
+delimiter |;
+create function foo3() returns bigint unsigned
+begin
+ insert into t1 values("foo3_32_");
+ call foo();
+ return 100;
+end|
+delimiter ;|
+insert into t2 select foo3();
+
+prepare stmt1 from 'insert into t2 select foo3()';
+execute stmt1;
+execute stmt1;
+deallocate prepare stmt1;
+
+# Test if stored function calls stored function which calls procedure
+# which requires row-based.
+
+delimiter |;
+create function foo4() returns bigint unsigned
+begin
+ insert into t2 select foo3();
+ return 100;
+end|
+delimiter ;|
+select foo4();
+
+prepare stmt1 from 'select foo4()';
+execute stmt1;
+execute stmt1;
+deallocate prepare stmt1;
+
+# A simple stored function
+delimiter |;
+create function foo5() returns bigint unsigned
+begin
+ insert into t2 select UUID();
+ return 100;
+end|
+delimiter ;|
+select foo5();
+
+prepare stmt1 from 'select foo5()';
+execute stmt1;
+execute stmt1;
+deallocate prepare stmt1;
+
+# A simple stored function where UUID() is in the argument
+delimiter |;
+create function foo6(x varchar(100)) returns bigint unsigned
+begin
+ insert into t2 select x;
+ return 100;
+end|
+delimiter ;|
+select foo6("foo6_1_");
+select foo6(concat("foo6_2_",UUID()));
+
+prepare stmt1 from 'select foo6(concat("foo6_3_",UUID()))';
+execute stmt1;
+execute stmt1;
+deallocate prepare stmt1;
+
+
+# Test of views using UUID()
+
+create view v1 as select uuid();
+create table t11 (data varchar(255));
+insert into t11 select * from v1;
+# Test of querying INFORMATION_SCHEMA which parses the view's body,
+# to verify that it binlogs statement-based (is not polluted by
+# the parsing of the view's body).
+insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11');
+prepare stmt1 from "insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11')";
+execute stmt1;
+execute stmt1;
+deallocate prepare stmt1;
+
+# Test of triggers with UUID()
+delimiter |;
+create trigger t11_bi before insert on t11 for each row
+begin
+ set NEW.data = concat(NEW.data,UUID());
+end|
+delimiter ;|
+insert into t11 values("try_560_");
+
+# Test that INSERT DELAYED works in mixed mode (BUG#20649)
+insert delayed into t2 values("delay_1_");
+insert delayed into t2 values(concat("delay_2_",UUID()));
+insert delayed into t2 values("delay_6_");
+
+# Test for BUG#20633 (INSERT DELAYED RAND()/user_variable does not
+# replicate fine in statement-based ; we test that in mixed mode it
+# works).
+insert delayed into t2 values(rand());
+set @a=2.345;
+insert delayed into t2 values(@a);
+
+sleep 4; # time for the delayed inserts to reach disk
+
# If you want to do manual testing of the mixed mode regarding UDFs (not
# testable automatically as quite platform- and compiler-dependent),
# you just need to set the variable below to 1, and to
@@ -164,30 +278,181 @@ if ($you_want_to_test_UDF)
{
CREATE FUNCTION metaphon RETURNS STRING SONAME 'udf_example.so';
prepare stmt1 from 'insert into t1 select metaphon(?)';
- set @string="emergency";
- insert into t1 values("work");
+ set @string="emergency_133_";
+ insert into t1 values("work_134_");
execute stmt1 using @string;
deallocate prepare stmt1;
prepare stmt1 from 'insert into t1 select ?';
- insert into t1 values(metaphon("work"));
+ insert into t1 values(metaphon("work_135_"));
execute stmt1 using @string;
deallocate prepare stmt1;
- insert into t1 values(metaphon("for"));
- insert into t1 select "yesterday";
- create table t6 select metaphon("for");
- create table t7 select 1 union select metaphon("for");
- create table t8 select * from t1 where 3 in (select 1 union select 2 union select metaphon("for") union select 3);
+ insert into t1 values(metaphon("for_136_"));
+ insert into t1 select "yesterday_137_";
+ create table t6 select metaphon("for_138_");
+ create table t7 select 1 union select metaphon("for_139_");
+ create table t8 select * from t1 where 3 in (select 1 union select 2 union select metaphon("for_140_") union select 3);
create table t9 select * from t1 where 3 in (select 1 union select 2 union select curdate() union select 3);
}
+create table t20 select * from t1; # save for comparing later
+create table t21 select * from t2;
+create table t22 select * from t3;
+drop table t1,t2,t3;
+
+# This tests the fix to
+# BUG#19630 stored function inserting into two auto_increment breaks statement-based binlog
+# We verify that under the mixed binlog mode, a stored function
+# modifying at least two tables having an auto_increment column,
+# is binlogged row-based. Indeed in statement-based binlogging,
+# only the auto_increment value generated for the first table
+# is recorded in the binlog, the value generated for the 2nd table
+# lacking.
+
+create table t1 (a int primary key auto_increment, b varchar(100));
+create table t2 (a int primary key auto_increment, b varchar(100));
+create table t3 (b varchar(100));
+delimiter |;
+create function f (x varchar(100)) returns int deterministic
+begin
+ insert into t1 values(null,x);
+ insert into t2 values(null,x);
+ return 1;
+end|
+delimiter ;|
+select f("try_41_");
+# Two operations which compensate each other except that their net
+# effect is that they advance the auto_increment counter of t2 on slave:
+sync_slave_with_master;
+use mysqltest1;
+insert into t2 values(2,null),(3,null),(4,null);
+delete from t2 where a>=2;
+
+connection master;
+# this is the call which didn't replicate well
+select f("try_42_");
+sync_slave_with_master;
+
+# now use prepared statement and test again, just to see that the RBB
+# mode isn't set at PREPARE but at EXECUTE.
+
+insert into t2 values(3,null),(4,null);
+delete from t2 where a>=3;
+
+connection master;
+prepare stmt1 from 'select f(?)';
+set @string="try_43_";
+insert into t1 values(null,"try_44_"); # should be SBB
+execute stmt1 using @string; # should be RBB
+deallocate prepare stmt1;
+sync_slave_with_master;
+
+# verify that if only one table has auto_inc, it does not trigger RBB
+# (we'll check in binlog further below)
+
+connection master;
+create table t12 select * from t1; # save for comparing later
+drop table t1;
+create table t1 (a int, b varchar(100), key(a));
+select f("try_45_");
+
+# restore table's key
+create table t13 select * from t1;
+drop table t1;
+create table t1 (a int primary key auto_increment, b varchar(100));
+
+# now test if it's two functions, each of them inserts in one table
+
+drop function f;
+# we need a unique key to have sorting of rows by mysqldump
+create table t14 (unique (a)) select * from t2;
+truncate table t2;
+delimiter |;
+create function f1 (x varchar(100)) returns int deterministic
+begin
+ insert into t1 values(null,x);
+ return 1;
+end|
+create function f2 (x varchar(100)) returns int deterministic
+begin
+ insert into t2 values(null,x);
+ return 1;
+end|
+delimiter ;|
+select f1("try_46_"),f2("try_47_");
+
+sync_slave_with_master;
+insert into t2 values(2,null),(3,null),(4,null);
+delete from t2 where a>=2;
+
+connection master;
+# Test with SELECT and INSERT
+select f1("try_48_"),f2("try_49_");
+insert into t3 values(concat("try_50_",f1("try_51_"),f2("try_52_")));
+sync_slave_with_master;
+
+# verify that if f2 does only read on an auto_inc table, this does not
+# switch to RBB
+connection master;
+drop function f2;
+delimiter |;
+create function f2 (x varchar(100)) returns int deterministic
+begin
+ declare y int;
+ insert into t1 values(null,x);
+ set y = (select count(*) from t2);
+ return y;
+end|
+delimiter ;|
+select f1("try_53_"),f2("try_54_");
+sync_slave_with_master;
+
+# And now, a normal statement with a trigger (no stored functions)
+
+connection master;
+drop function f2;
+delimiter |;
+create trigger t1_bi before insert on t1 for each row
+begin
+ insert into t2 values(null,"try_55_");
+end|
+delimiter ;|
+insert into t1 values(null,"try_56_");
+# and now remove one auto_increment and verify SBB
+alter table t1 modify a int, drop primary key;
+insert into t1 values(null,"try_57_");
+sync_slave_with_master;
+
+# Test for BUG#20499 "mixed mode with temporary table breaks binlog"
+# Slave used to have only 2 rows instead of 3.
+connection master;
+CREATE TEMPORARY TABLE t15 SELECT UUID();
+create table t16 like t15;
+INSERT INTO t16 SELECT * FROM t15;
+# we'll verify that this one is done RBB
+insert into t16 values("try_65_");
+drop table t15;
+# we'll verify that this one is done SBB
+insert into t16 values("try_66_");
+sync_slave_with_master;
+
# and now compare:
+connection master;
+
# first check that data on master is sensible
select count(*) from t1;
select count(*) from t2;
select count(*) from t3;
select count(*) from t4;
select count(*) from t5;
+select count(*) from t11;
+select count(*) from t20;
+select count(*) from t21;
+select count(*) from t22;
+select count(*) from t12;
+select count(*) from t13;
+select count(*) from t14;
+select count(*) from t16;
if ($you_want_to_test_UDF)
{
select count(*) from t6;
@@ -196,21 +461,46 @@ if ($you_want_to_test_UDF)
select count(*) from t9;
}
---replace_column 2 # 5 #
---replace_regex /table_id: [0-9]+/table_id: #/
-show binlog events from 102;
sync_slave_with_master;
# as we're using UUID we don't SELECT but use "diff" like in rpl_row_UUID
--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql
--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_slave.sql
-connection master;
-drop database mysqltest1;
-sync_slave_with_master;
-
# Let's compare. Note: If they match test will pass, if they do not match
# the test will show that the diff statement failed and not reject file
# will be created. You will need to go to the mysql-test dir and diff
# the files your self to see what is not matching
--exec diff $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_slave.sql;
+
+connection master;
+--replace_column 2 # 5 #
+--replace_regex /table_id: [0-9]+/table_id: #/
+show binlog events from 102;
+
+# Now test that mysqlbinlog works fine on a binlog generated by the
+# mixed mode
+
+# BUG#11312 "DELIMITER is not written to the binary log that causes
+# syntax error" makes that mysqlbinlog will fail if we pass it the
+# text of queries; this forces us to use --base64-output here.
+
+# BUG#20929 "BINLOG command causes invalid free plus assertion
+# failure" makes mysqld segfault when receiving --base64-output
+
+# So I can't enable this piece of test
+# SIGH
+
+if ($enable_when_11312_or_20929_fixed)
+{
+--exec $MYSQL_BINLOG --base64-output $MYSQLTEST_VARDIR/log/master-bin.000001 > $MYSQLTEST_VARDIR/tmp/mysqlbinlog_mixed.sql
+drop database mysqltest1;
+--exec $MYSQL < $MYSQLTEST_VARDIR/tmp/mysqlbinlog_mixed.sql
+--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql
+# the old mysqldump output on slave is the same as what it was on
+# master before restoring on master.
+--exec diff $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_slave.sql;
+}
+
+drop database mysqltest1;
+sync_slave_with_master;
diff --git a/mysql-test/t/rpl_temporary.test b/mysql-test/t/rpl_temporary.test
index 0d91a9f8e91..fc336db1a3a 100644
--- a/mysql-test/t/rpl_temporary.test
+++ b/mysql-test/t/rpl_temporary.test
@@ -142,11 +142,8 @@ create temporary table t4 (f int);
create table t5 (f int);
sync_with_master;
# find dumper's $id
-source include/get_binlog_dump_thread_id.inc;
-insert into t4 values (1);
-# a hint how to do that in 5.1
---replace_result $id "`select id from information_schema.processlist where command='Binlog Dump'`"
-eval kill $id; # to stimulate reconnection by slave w/o timeout
+select id from information_schema.processlist where command='Binlog Dump' into @id;
+kill @id; # to stimulate reconnection by slave w/o timeout
insert into t5 select * from t4;
save_master_pos;
@@ -170,7 +167,7 @@ create temporary table t101 (id int);
create temporary table t102 (id int);
set @@session.pseudo_thread_id=200;
create temporary table t201 (id int);
-#create temporary table `t``201` (id int);
+create temporary table `t``201` (id int);
# emulate internal temp table not to come to binlog
create temporary table `#sql_not_user_table202` (id int);
set @@session.pseudo_thread_id=300;
@@ -203,4 +200,4 @@ select * from t1;
connection master;
drop table t1;
-# End of 5.0 tests
+# End of 5.1 tests
diff --git a/mysql-test/t/rpl_variables.test b/mysql-test/t/rpl_variables.test
index 57ae2b9c3c4..b1744c57c9b 100644
--- a/mysql-test/t/rpl_variables.test
+++ b/mysql-test/t/rpl_variables.test
@@ -12,3 +12,6 @@ show variables like 'slave_load_tmpdir';
# We just set some arbitrary values in variables-master.opt so we can test
# that a list of values works correctly
show variables like 'slave_skip_errors';
+
+# Cleanup
+set global slave_net_timeout=default;
diff --git a/mysql-test/t/select.test b/mysql-test/t/select.test
index b75d0dd8bb6..27c5a327ac8 100644
--- a/mysql-test/t/select.test
+++ b/mysql-test/t/select.test
@@ -2286,6 +2286,52 @@ EXPLAIN SELECT * FROM t1 FORCE INDEX (a);
DROP TABLE t1;
#
+# Bug #21019: First result of SELECT COUNT(*) different than consecutive runs
+#
+CREATE TABLE t1 (a int, b int);
+INSERT INTO t1 VALUES (1,1), (2,1), (4,10);
+
+CREATE TABLE t2 (a int PRIMARY KEY, b int, KEY b (b));
+INSERT INTO t2 VALUES (1,NULL), (2,10);
+ALTER TABLE t1 ENABLE KEYS;
+
+EXPLAIN SELECT STRAIGHT_JOIN SQL_NO_CACHE COUNT(*) FROM t2, t1 WHERE t1.b = t2.b OR t2.b IS NULL;
+SELECT STRAIGHT_JOIN SQL_NO_CACHE * FROM t2, t1 WHERE t1.b = t2.b OR t2.b IS NULL;
+EXPLAIN SELECT STRAIGHT_JOIN SQL_NO_CACHE COUNT(*) FROM t2, t1 WHERE t1.b = t2.b OR t2.b IS NULL;
+SELECT STRAIGHT_JOIN SQL_NO_CACHE * FROM t2, t1 WHERE t1.b = t2.b OR t2.b IS NULL;
+DROP TABLE IF EXISTS t1,t2;
+
+#
+# Bug #20954 "avg(keyval) retuns 0.38 but max(keyval) returns an empty set"
+#
+--disable_ps_protocol
+CREATE TABLE t1 (key1 float default NULL, UNIQUE KEY key1 (key1));
+CREATE TABLE t2 (key2 float default NULL, UNIQUE KEY key2 (key2));
+INSERT INTO t1 VALUES (0.3762),(0.3845),(0.6158),(0.7941);
+INSERT INTO t2 VALUES (1.3762),(1.3845),(1.6158),(1.7941);
+
+explain select max(key1) from t1 where key1 <= 0.6158;
+explain select max(key2) from t2 where key2 <= 1.6158;
+explain select min(key1) from t1 where key1 >= 0.3762;
+explain select min(key2) from t2 where key2 >= 1.3762;
+explain select max(key1), min(key2) from t1, t2
+where key1 <= 0.6158 and key2 >= 1.3762;
+explain select max(key1) from t1 where key1 <= 0.6158 and rand() + 0.5 >= 0.5;
+explain select min(key1) from t1 where key1 >= 0.3762 and rand() + 0.5 >= 0.5;
+
+select max(key1) from t1 where key1 <= 0.6158;
+select max(key2) from t2 where key2 <= 1.6158;
+select min(key1) from t1 where key1 >= 0.3762;
+select min(key2) from t2 where key2 >= 1.3762;
+select max(key1), min(key2) from t1, t2
+where key1 <= 0.6158 and key2 >= 1.3762;
+select max(key1) from t1 where key1 <= 0.6158 and rand() + 0.5 >= 0.5;
+select min(key1) from t1 where key1 >= 0.3762 and rand() + 0.5 >= 0.5;
+
+DROP TABLE t1,t2;
+--enable_ps_protocol
+
+#
# Bug #18759 "Incorrect string to numeric conversion"
#
# This test is here so that the behavior will not be changed to 4.1
@@ -2901,3 +2947,14 @@ from t1 left outer join t2 on t1.a = t2.c and t2.b <> 1
where t1.b <> 1 order by t1.a;
drop table t1,t2;
+
+#
+# Bug #20569: Garbage in DECIMAL results from some mathematical functions
+#
+SELECT 0.9888889889 * 1.011111411911;
+
+#
+# Bug #10977: No warning issued if a column name is truncated
+#
+prepare stmt from 'select 1 as " a "';
+execute stmt;
diff --git a/mysql-test/t/show_check.test b/mysql-test/t/show_check.test
index 94894ef50de..00520df350c 100644
--- a/mysql-test/t/show_check.test
+++ b/mysql-test/t/show_check.test
@@ -395,11 +395,10 @@ show create table t1;
drop table if exists t1;
system rm -f $MYSQLTEST_VARDIR/master-data/test/t1.frm ;
-
-# End of 4.1 tests
#
# BUG 12183 - SHOW OPEN TABLES behavior doesn't match grammar
# First we close all open tables with FLUSH tables and then we open some.
+#
CREATE TABLE txt1(a int);
CREATE TABLE tyt2(a int);
CREATE TABLE urkunde(a int);
@@ -421,7 +420,78 @@ DROP TABLE urkunde;
--error 1049
SHOW TABLES FROM non_existing_database;
-# End of 4.1 tests
+--echo End of 4.1 tests
+
+#
+# Bug#17203: "sql_no_cache sql_cache" in views created from prepared
+# statement
+#
+# The problem was that initial user setting was forgotten, and current
+# runtime-determined values of the flags were shown instead.
+#
+--disable_warnings
+DROP VIEW IF EXISTS v1;
+DROP PROCEDURE IF EXISTS p1;
+--enable_warnings
+
+# Check that SHOW CREATE VIEW shows SQL_CACHE flag exaclty as
+# specified by the user.
+CREATE VIEW v1 AS SELECT 1;
+SHOW CREATE VIEW v1;
+DROP VIEW v1;
+
+CREATE VIEW v1 AS SELECT SQL_CACHE 1;
+SHOW CREATE VIEW v1;
+DROP VIEW v1;
+
+CREATE VIEW v1 AS SELECT SQL_NO_CACHE 1;
+SHOW CREATE VIEW v1;
+DROP VIEW v1;
+
+# Usage of NOW() disables caching, but we still have show what the
+# user have specified.
+CREATE VIEW v1 AS SELECT NOW();
+SHOW CREATE VIEW v1;
+DROP VIEW v1;
+
+CREATE VIEW v1 AS SELECT SQL_CACHE NOW();
+SHOW CREATE VIEW v1;
+DROP VIEW v1;
+
+CREATE VIEW v1 AS SELECT SQL_NO_CACHE NOW();
+SHOW CREATE VIEW v1;
+DROP VIEW v1;
+
+# Check that SQL_NO_CACHE always wins.
+CREATE VIEW v1 AS SELECT SQL_CACHE SQL_NO_CACHE NOW();
+SHOW CREATE VIEW v1;
+DROP VIEW v1;
+
+CREATE VIEW v1 AS SELECT SQL_NO_CACHE SQL_CACHE NOW();
+SHOW CREATE VIEW v1;
+DROP VIEW v1;
+
+CREATE VIEW v1 AS SELECT SQL_CACHE SQL_NO_CACHE SQL_CACHE NOW();
+SHOW CREATE VIEW v1;
+DROP VIEW v1;
+
+# Check CREATE VIEW in a prepared statement in a procedure.
+delimiter |;
+CREATE PROCEDURE p1()
+BEGIN
+ SET @s= 'CREATE VIEW v1 AS SELECT SQL_CACHE 1';
+ PREPARE stmt FROM @s;
+ EXECUTE stmt;
+ DROP PREPARE stmt;
+END |
+delimiter ;|
+CALL p1();
+SHOW CREATE VIEW v1;
+
+DROP PROCEDURE p1;
+DROP VIEW v1;
+
+--echo End of 5.0 tests.
--disable_result_log
SHOW AUTHORS;
diff --git a/mysql-test/t/sp-error.test b/mysql-test/t/sp-error.test
index 24b0d4674c5..d370cb3037c 100644
--- a/mysql-test/t/sp-error.test
+++ b/mysql-test/t/sp-error.test
@@ -1075,6 +1075,10 @@ execute stmt;
drop function bug11834_1;
# Attempt to execute statement should return proper error and
# should not crash server.
+
+# NOTE! The error we get from the below query indicates that the sp bug11834_2
+# does not exist(this is wrong but can be accepted)
+# This behaviour has been reported as bug#21294
--error ER_SP_DOES_NOT_EXIST
execute stmt;
deallocate prepare stmt;
diff --git a/mysql-test/t/sp-security.test b/mysql-test/t/sp-security.test
index a8c3c0a22eb..591e9a3ed70 100644
--- a/mysql-test/t/sp-security.test
+++ b/mysql-test/t/sp-security.test
@@ -527,29 +527,6 @@ drop database db_bug14533;
#
-# BUG#7787: Stored procedures: improper warning for "grant execute" statement
-#
-
-# Prepare.
-
-CREATE DATABASE db_bug7787;
-use db_bug7787;
-
-# Test.
-
-CREATE PROCEDURE p1()
- SHOW INNODB STATUS;
-
-GRANT EXECUTE ON PROCEDURE p1 TO user_bug7787@localhost;
-
-# Cleanup.
-
-DROP DATABASE db_bug7787;
-drop user user_bug7787@localhost;
-use test;
-
-
-#
# WL#2897: Complete definer support in the stored routines.
#
# The following cases are tested:
@@ -744,4 +721,50 @@ DROP USER mysqltest_2@localhost;
DROP DATABASE mysqltest;
+#
+# Bug#19857 - When a user with CREATE ROUTINE priv creates a routine,
+# it results in NULL p/w
+#
+
+# Can't test with embedded server that doesn't support grants
+
+GRANT USAGE ON *.* TO user19857@localhost IDENTIFIED BY 'meow';
+GRANT SELECT, INSERT, UPDATE, DELETE, CREATE ROUTINE, ALTER ROUTINE ON test.* TO
+user19857@localhost;
+SELECT Host,User,Password FROM mysql.user WHERE User='user19857';
+
+--connect (mysqltest_2_con,localhost,user19857,meow,test)
+--echo
+--echo ---> connection: mysqltest_2_con
+--connection mysqltest_2_con
+
+use test;
+
+DELIMITER //;
+ CREATE PROCEDURE sp19857() DETERMINISTIC
+ BEGIN
+ DECLARE a INT;
+ SET a=1;
+ SELECT a;
+ END //
+DELIMITER ;//
+
+SHOW CREATE PROCEDURE test.sp19857;
+
+--disconnect mysqltest_2_con
+--connect (mysqltest_2_con,localhost,user19857,meow,test)
+--connection mysqltest_2_con
+
+DROP PROCEDURE IF EXISTS test.sp19857;
+
+--echo
+--echo ---> connection: root
+--connection con1root
+
+--disconnect mysqltest_2_con
+
+SELECT Host,User,Password FROM mysql.user WHERE User='user19857';
+
+DROP USER user19857@localhost;
+
# End of 5.0 bugs.
diff --git a/mysql-test/t/sp-vars.test b/mysql-test/t/sp-vars.test
index 81504904797..48dbd4de7aa 100644
--- a/mysql-test/t/sp-vars.test
+++ b/mysql-test/t/sp-vars.test
@@ -1271,3 +1271,39 @@ SELECT f1();
#
DROP FUNCTION f1;
+
+
+#
+# Bug#17226: Variable set in cursor on first iteration is assigned
+# second iterations value
+#
+# The problem was in incorrect handling of local variables of type
+# TEXT (BLOB).
+#
+--disable_warnings
+DROP PROCEDURE IF EXISTS p1;
+--enable_warnings
+
+delimiter |;
+CREATE PROCEDURE p1()
+BEGIN
+ DECLARE v_char VARCHAR(255);
+ DECLARE v_text TEXT DEFAULT '';
+
+ SET v_char = 'abc';
+
+ SET v_text = v_char;
+
+ SET v_char = 'def';
+
+ SET v_text = concat(v_text, '|', v_char);
+
+ SELECT v_text;
+END|
+delimiter ;|
+
+CALL p1();
+
+DROP PROCEDURE p1;
+
+# End of 5.0 tests.
diff --git a/mysql-test/t/sp.test b/mysql-test/t/sp.test
index cfe9ad12277..3052a8c9161 100644
--- a/mysql-test/t/sp.test
+++ b/mysql-test/t/sp.test
@@ -5636,23 +5636,6 @@ drop table t3|
drop procedure bug16887|
#
-# Bug#13575 SP funcs in select with distinct/group and order by can
-# produce bad data
-#
-create table t3 (f1 int, f2 varchar(3), primary key(f1)) engine=innodb|
-insert into t3 values (1,'aaa'),(2,'bbb'),(3,'ccc')|
-CREATE FUNCTION bug13575 ( p1 integer )
-returns varchar(3)
-BEGIN
-DECLARE v1 VARCHAR(10) DEFAULT null;
-SELECT f2 INTO v1 FROM t3 WHERE f1 = p1;
-RETURN v1;
-END|
-select distinct f1, bug13575(f1) from t3 order by f1|
-drop function bug13575;
-drop table t3|
-
-#
# BUG#16474: SP crashed MySQL
# (when using "order by localvar", where 'localvar' is just that.
#
@@ -5894,6 +5877,52 @@ DROP PROCEDURE bug18037_p1|
DROP PROCEDURE bug18037_p2|
#
+# Bug#17199: "Table not found" error occurs if the query contains a call
+# to a function from another database.
+# See also ps.test for an additional test case for this bug.
+#
+use test|
+create table t3 (i int)|
+insert into t3 values (1), (2)|
+create database mysqltest1|
+use mysqltest1|
+create function bug17199() returns varchar(2) deterministic return 'ok'|
+use test|
+select *, mysqltest1.bug17199() from t3|
+#
+# Bug#18444: Fully qualified stored function names don't work correctly
+# in select statements
+#
+use mysqltest1|
+create function bug18444(i int) returns int no sql deterministic return i + 1|
+use test|
+select mysqltest1.bug18444(i) from t3|
+drop database mysqltest1|
+#
+# Check that current database has no influence to a stored procedure
+#
+create database mysqltest1 charset=utf8|
+create database mysqltest2 charset=utf8|
+create procedure mysqltest1.p1()
+begin
+-- alters the default collation of database test
+ alter database character set koi8r;
+end|
+use mysqltest1|
+call p1()|
+show create database mysqltest1|
+show create database mysqltest2|
+alter database mysqltest1 character set utf8|
+use mysqltest2|
+call mysqltest1.p1()|
+show create database mysqltest1|
+show create database mysqltest2|
+drop database mysqltest1|
+drop database mysqltest2|
+#
+# Restore the old environemnt
+use test|
+#
# Bug#15217 "Using a SP cursor on a table created with PREPARE fails with
# weird error". Check that the code that is supposed to work at
# the first execution of a stored procedure actually works for
@@ -5922,6 +5951,24 @@ drop table t3|
drop procedure bug15217|
#
+# BUG#19862: Sort with filesort by function evaluates function twice
+#
+--disable_warnings
+drop procedure if exists bug19862|
+--enable_warnings
+CREATE TABLE t11 (a INT)|
+CREATE TABLE t12 (a INT)|
+CREATE FUNCTION bug19862(x INT) RETURNS INT
+ BEGIN
+ INSERT INTO t11 VALUES (x);
+ RETURN x+1;
+ END|
+INSERT INTO t12 VALUES (1), (2)|
+SELECT bug19862(a) FROM t12 ORDER BY 1|
+SELECT * FROM t11|
+DROP TABLE t11, t12|
+DROP FUNCTION bug19862|
+#
# BUG#NNNN: New bug synopsis
#
#--disable_warnings
diff --git a/mysql-test/t/sp_notembedded.test b/mysql-test/t/sp_notembedded.test
index 0adbeb2d98b..28abf448089 100644
--- a/mysql-test/t/sp_notembedded.test
+++ b/mysql-test/t/sp_notembedded.test
@@ -46,6 +46,8 @@ call bug4902_2()|
drop procedure bug4902_2|
+# Disable until bug#17244 is fixed
+--disable_parsing
#
# BUG#5278: Stored procedure packets out of order if SET PASSWORD.
#
@@ -63,7 +65,7 @@ select bug5278()|
--error 1133
select bug5278()|
drop function bug5278|
-
+--enable_parsing
--disable_warnings
drop table if exists t1|
@@ -265,3 +267,23 @@ drop view v1|
drop table t3|
delimiter ;|
+
+#
+# Bug#15298 SHOW GRANTS FOR CURRENT_USER: Incorrect output in DEFINER context
+#
+--disable_warnings
+drop procedure if exists bug15298_1;
+drop procedure if exists bug15298_2;
+--enable_warnings
+grant all privileges on test.* to 'mysqltest_1'@'localhost';
+create procedure 15298_1 () sql security definer show grants for current_user;
+create procedure 15298_2 () sql security definer show grants;
+
+connect (con1,localhost,mysqltest_1,,test);
+call 15298_1();
+call 15298_2();
+
+connection default;
+drop user mysqltest_1@localhost;
+drop procedure 15298_1;
+drop procedure 15298_2;
diff --git a/mysql-test/t/sp_trans.test b/mysql-test/t/sp_trans.test
index 1ea32316f1e..325e11b1ec9 100644
--- a/mysql-test/t/sp_trans.test
+++ b/mysql-test/t/sp_trans.test
@@ -552,6 +552,45 @@ drop table t3, t4|
drop procedure bug14210|
set @@session.max_heap_table_size=default|
+#
+# BUG#7787: Stored procedures: improper warning for "grant execute" statement
+#
+
+# Prepare.
+
+CREATE DATABASE db_bug7787|
+use db_bug7787|
+
+# Test.
+
+CREATE PROCEDURE p1()
+ SHOW INNODB STATUS; |
+
+GRANT EXECUTE ON PROCEDURE p1 TO user_bug7787@localhost|
+
+# Cleanup.
+
+DROP DATABASE db_bug7787|
+drop user user_bug7787@localhost|
+use test|
+
+#
+# Bug#13575 SP funcs in select with distinct/group and order by can
+# produce bad data
+#
+create table t3 (f1 int, f2 varchar(3), primary key(f1)) engine=innodb|
+insert into t3 values (1,'aaa'),(2,'bbb'),(3,'ccc')|
+CREATE FUNCTION bug13575 ( p1 integer )
+returns varchar(3)
+BEGIN
+DECLARE v1 VARCHAR(10) DEFAULT null;
+SELECT f2 INTO v1 FROM t3 WHERE f1 = p1;
+RETURN v1;
+END|
+select distinct f1, bug13575(f1) from t3 order by f1|
+drop function bug13575|
+drop table t3|
+
#
# BUG#NNNN: New bug synopsis
diff --git a/mysql-test/t/strict.test b/mysql-test/t/strict.test
index 212150e057c..f6a8824c841 100644
--- a/mysql-test/t/strict.test
+++ b/mysql-test/t/strict.test
@@ -1155,3 +1155,42 @@ create table t2 select date from t1;
show create table t2;
drop table t2,t1;
set @@sql_mode= @org_mode;
+
+#
+# Bug #13934 Silent truncation of table comments
+#
+set @@sql_mode='traditional';
+--error 1105
+create table t1 (i int)
+comment '123456789*123456789*123456789*123456789*123456789*
+ 123456789*123456789*123456789*123456789*123456789*';
+--error 1105
+create table t1 (
+i int comment
+'123456789*123456789*123456789*123456789*
+ 123456789*123456789*123456789*123456789*
+ 123456789*123456789*123456789*123456789*
+ 123456789*123456789*123456789*123456789*
+ 123456789*123456789*123456789*123456789*
+ 123456789*123456789*123456789*123456789*
+ 123456789*123456789*123456789*123456789*');
+set @@sql_mode= @org_mode;
+create table t1
+(i int comment
+ '123456789*123456789*123456789*123456789*
+ 123456789*123456789*123456789*123456789*
+ 123456789*123456789*123456789*123456789*
+ 123456789*123456789*123456789*123456789*
+ 123456789*123456789*123456789*123456789*
+ 123456789*123456789*123456789*123456789*
+ 123456789*123456789*123456789*123456789*');
+
+select column_name, column_comment from information_schema.columns where
+table_schema = 'test' and table_name = 't1';
+drop table t1;
+
+set names utf8;
+create table t1 (i int)
+comment '123456789*123456789*123456789*123456789*123456789*123456789*';
+show create table t1;
+drop table t1;
diff --git a/mysql-test/t/subselect.test b/mysql-test/t/subselect.test
index 8916a5cec6d..c9ed62f0e54 100644
--- a/mysql-test/t/subselect.test
+++ b/mysql-test/t/subselect.test
@@ -1821,6 +1821,54 @@ SELECT * FROM t1
DROP TABLE t1,t2,t3;
+#
+# BUG #10308: purge log with subselect
+#
+
+purge master logs before (select adddate(current_timestamp(), interval -4 day));
+
+
+#
+# Bug#18503: Queries with a quantified subquery returning empty set may
+# return a wrong result.
+#
+CREATE TABLE t1 (f1 INT);
+CREATE TABLE t2 (f2 INT);
+INSERT INTO t1 VALUES (1);
+SELECT * FROM t1 WHERE f1 > ALL (SELECT f2 FROM t2);
+SELECT * FROM t1 WHERE f1 > ALL (SELECT f2 FROM t2 WHERE 1=0);
+INSERT INTO t2 VALUES (1);
+INSERT INTO t2 VALUES (2);
+SELECT * FROM t1 WHERE f1 > ALL (SELECT f2 FROM t2 WHERE f2=0);
+DROP TABLE t1, t2;
+
+#
+# Bug#16302: Quantified subquery without any tables gives wrong results
+#
+select 1 from dual where 1 < any (select 2);
+select 1 from dual where 1 < all (select 2);
+select 1 from dual where 2 > any (select 1);
+select 1 from dual where 2 > all (select 1);
+select 1 from dual where 1 < any (select 2 from dual);
+select 1 from dual where 1 < all (select 2 from dual where 1!=1);
+
+# BUG#20975 Wrong query results for subqueries within NOT
+create table t1 (s1 char);
+insert into t1 values (1),(2);
+
+select * from t1 where (s1 < any (select s1 from t1));
+select * from t1 where not (s1 < any (select s1 from t1));
+
+select * from t1 where (s1 < ALL (select s1+1 from t1));
+select * from t1 where not(s1 < ALL (select s1+1 from t1));
+
+select * from t1 where (s1+1 = ANY (select s1 from t1));
+select * from t1 where NOT(s1+1 = ANY (select s1 from t1));
+
+select * from t1 where (s1 = ALL (select s1/s1 from t1));
+select * from t1 where NOT(s1 = ALL (select s1/s1 from t1));
+drop table t1;
+# End of 4.1 tests
# End of 4.1 tests
#
@@ -2131,3 +2179,81 @@ SELECT t1.i FROM t1 WHERE t1.i = CAST((SELECT MAX(i) FROM t2) AS UNSIGNED);
DROP TABLE t1;
DROP TABLE t2;
+
+#
+# Bug#20519: subselect with LIMIT M, N
+#
+
+CREATE TABLE t1 (
+ id bigint(20) unsigned NOT NULL auto_increment,
+ name varchar(255) NOT NULL,
+ PRIMARY KEY (id)
+);
+INSERT INTO t1 VALUES
+ (1, 'Balazs'), (2, 'Joe'), (3, 'Frank');
+
+CREATE TABLE t2 (
+ id bigint(20) unsigned NOT NULL auto_increment,
+ mid bigint(20) unsigned NOT NULL,
+ date date NOT NULL,
+ PRIMARY KEY (id)
+);
+INSERT INTO t2 VALUES
+ (1, 1, '2006-03-30'), (2, 2, '2006-04-06'), (3, 3, '2006-04-13'),
+ (4, 2, '2006-04-20'), (5, 1, '2006-05-01');
+
+SELECT *,
+ (SELECT date FROM t2 WHERE mid = t1.id
+ ORDER BY date DESC LIMIT 0, 1) AS date_last,
+ (SELECT date FROM t2 WHERE mid = t1.id
+ ORDER BY date DESC LIMIT 3, 1) AS date_next_to_last
+ FROM t1;
+SELECT *,
+ (SELECT COUNT(*) FROM t2 WHERE mid = t1.id
+ ORDER BY date DESC LIMIT 1, 1) AS date_count
+ FROM t1;
+SELECT *,
+ (SELECT date FROM t2 WHERE mid = t1.id
+ ORDER BY date DESC LIMIT 0, 1) AS date_last,
+ (SELECT date FROM t2 WHERE mid = t1.id
+ ORDER BY date DESC LIMIT 1, 1) AS date_next_to_last
+ FROM t1;
+DROP TABLE t1,t2;
+
+#
+# Bug#20869: subselect with range access by DESC
+#
+
+CREATE TABLE t1 (
+ i1 int(11) NOT NULL default '0',
+ i2 int(11) NOT NULL default '0',
+ t datetime NOT NULL default '0000-00-00 00:00:00',
+ PRIMARY KEY (i1,i2,t)
+);
+INSERT INTO t1 VALUES
+(24,1,'2005-03-03 16:31:31'),(24,1,'2005-05-27 12:40:07'),
+(24,1,'2005-05-27 12:40:08'),(24,1,'2005-05-27 12:40:10'),
+(24,1,'2005-05-27 12:40:25'),(24,1,'2005-05-27 12:40:30'),
+(24,2,'2005-03-03 13:43:05'),(24,2,'2005-03-03 16:23:31'),
+(24,2,'2005-03-03 16:31:30'),(24,2,'2005-05-27 12:37:02'),
+(24,2,'2005-05-27 12:40:06');
+
+CREATE TABLE t2 (
+ i1 int(11) NOT NULL default '0',
+ i2 int(11) NOT NULL default '0',
+ t datetime default NULL,
+ PRIMARY KEY (i1)
+);
+INSERT INTO t2 VALUES (24,1,'2006-06-20 12:29:40');
+
+EXPLAIN
+SELECT * FROM t1,t2
+ WHERE t1.t = (SELECT t1.t FROM t1
+ WHERE t1.t < t2.t AND t1.i2=1 AND t2.i1=t1.i1
+ ORDER BY t1.t DESC LIMIT 1);
+SELECT * FROM t1,t2
+ WHERE t1.t = (SELECT t1.t FROM t1
+ WHERE t1.t < t2.t AND t1.i2=1 AND t2.i1=t1.i1
+ ORDER BY t1.t DESC LIMIT 1);
+
+DROP TABLE t1, t2;
diff --git a/mysql-test/t/subselect2.test b/mysql-test/t/subselect2.test
index b21eda176b6..162bdd0d90a 100644
--- a/mysql-test/t/subselect2.test
+++ b/mysql-test/t/subselect2.test
@@ -150,3 +150,21 @@ EXPLAIN SELECT t2.*, t4.DOCTYPENAME, t1.CONTENTSIZE,t1.MIMETYPE FROM t2 INNER JO
drop table t1, t2, t3, t4;
# End of 4.1 tests
+
+#
+# Bug #20792: Incorrect results from aggregate subquery
+#
+CREATE TABLE t1 (a int(10) , PRIMARY KEY (a)) Engine=InnoDB;
+INSERT INTO t1 VALUES (1),(2);
+
+CREATE TABLE t2 (a int(10), PRIMARY KEY (a)) Engine=InnoDB;
+INSERT INTO t2 VALUES (1);
+
+CREATE TABLE t3 (a int(10), b int(10), c int(10),
+ PRIMARY KEY (a)) Engine=InnoDB;
+INSERT INTO t3 VALUES (1,2,1);
+
+SELECT t1.* FROM t1 WHERE (SELECT COUNT(*) FROM t3,t2 WHERE t3.c=t2.a
+ and t2.a='1' AND t1.a=t3.b) > 0;
+
+DROP TABLE t1,t2,t3;
diff --git a/mysql-test/t/trigger.test b/mysql-test/t/trigger.test
index 58adffc6e87..735a2ad78b8 100644
--- a/mysql-test/t/trigger.test
+++ b/mysql-test/t/trigger.test
@@ -237,7 +237,7 @@ begin
end|
delimiter ;|
insert into t3 values (1);
---error 1048
+--error ER_BAD_NULL_ERROR
insert into t1 values (4, "four", 1), (5, "five", 2);
select * from t1;
select * from t2;
@@ -295,19 +295,19 @@ drop table t1, t2;
create table t1 (i int);
create table t3 (i int);
---error 1363
+--error ER_TRG_NO_SUCH_ROW_IN_TRG
create trigger trg before insert on t1 for each row set @a:= old.i;
---error 1363
+--error ER_TRG_NO_SUCH_ROW_IN_TRG
create trigger trg before delete on t1 for each row set @a:= new.i;
---error 1362
+--error ER_TRG_CANT_CHANGE_ROW
create trigger trg before update on t1 for each row set old.i:=1;
---error 1363
+--error ER_TRG_NO_SUCH_ROW_IN_TRG
create trigger trg before delete on t1 for each row set new.i:=1;
---error 1362
+--error ER_TRG_CANT_CHANGE_ROW
create trigger trg after update on t1 for each row set new.i:=1;
---error 1054
+--error ER_BAD_FIELD_ERROR
create trigger trg before update on t1 for each row set new.j:=1;
---error 1054
+--error ER_BAD_FIELD_ERROR
create trigger trg before update on t1 for each row set @a:=old.j;
@@ -315,25 +315,25 @@ create trigger trg before update on t1 for each row set @a:=old.j;
# Let us test various trigger creation errors
# Also quickly test table namespace (bug#5892/6182)
#
---error 1146
+--error ER_NO_SUCH_TABLE
create trigger trg before insert on t2 for each row set @a:=1;
create trigger trg before insert on t1 for each row set @a:=1;
---error 1359
+--error ER_TRG_ALREADY_EXISTS
create trigger trg after insert on t1 for each row set @a:=1;
---error 1359
+--error ER_NOT_SUPPORTED_YET
create trigger trg2 before insert on t1 for each row set @a:=1;
---error 1359
+--error ER_TRG_ALREADY_EXISTS
create trigger trg before insert on t3 for each row set @a:=1;
create trigger trg2 before insert on t3 for each row set @a:=1;
drop trigger trg2;
drop trigger trg;
---error 1360
+--error ER_TRG_DOES_NOT_EXIST
drop trigger trg;
create view v1 as select * from t1;
---error 1347
+--error ER_WRONG_OBJECT
create trigger trg before insert on v1 for each row set @a:=1;
drop view v1;
@@ -341,7 +341,7 @@ drop table t1;
drop table t3;
create temporary table t1 (i int);
---error 1361
+--error ER_TRG_ON_VIEW_OR_TEMP_TABLE
create trigger trg before insert on t1 for each row set @a:=1;
drop table t1;
@@ -495,47 +495,47 @@ select * from t1;
# their main effect. This is because operation on the table row is
# executed before "after" trigger and its effect cannot be rolled back
# when whole statement fails, because t1 is MyISAM table.
---error 1054
+--error ER_BAD_FIELD_ERROR
insert into t1 values (2, 1);
select * from t1;
---error 1054
+--error ER_BAD_FIELD_ERROR
update t1 set k = 2 where i = 2;
select * from t1;
---error 1054
+--error ER_BAD_FIELD_ERROR
delete from t1 where i = 2;
select * from t1;
# Should fail and insert only 1 row
---error 1054
+--error ER_BAD_FIELD_ERROR
load data infile '../std_data_ln/loaddata5.dat' into table t1 fields terminated by '' enclosed by '' (i, k);
select * from t1;
---error 1054
+--error ER_BAD_FIELD_ERROR
insert into t1 select 3, 3;
select * from t1;
# Multi-update working on the fly, again it will update only
# one row even if more matches
---error 1054
+--error ER_BAD_FIELD_ERROR
update t1, t2 set k = k + 10 where t1.i = t2.i;
select * from t1;
# The same for multi-update via temp table
---error 1054
+--error ER_BAD_FIELD_ERROR
update t1, t2 set k = k + 10 where t1.i = t2.i and k < 3;
select * from t1;
# Multi-delete on the fly
---error 1054
+--error ER_BAD_FIELD_ERROR
delete t1, t2 from t1 straight_join t2 where t1.i = t2.i;
select * from t1;
# And via temporary storage
---error 1054
+--error ER_BAD_FIELD_ERROR
delete t2, t1 from t2 straight_join t1 where t1.i = t2.i;
select * from t1;
# Prepare table for testing of REPLACE and INSERT ... ON DUPLICATE KEY UPDATE
alter table t1 add primary key (i);
---error 1054
+--error ER_BAD_FIELD_ERROR
insert into t1 values (3, 4) on duplicate key update k= k + 10;
select * from t1;
# The following statement will delete old row and won't
# insert new one since after delete trigger will fail.
---error 1054
+--error ER_BAD_FIELD_ERROR
replace into t1 values (3, 3);
select * from t1;
# Also drops all triggers
@@ -553,33 +553,33 @@ alter table t1 drop column bt;
# The following statements changing t1 should fail and should not
# cause any effect on table, since "before" trigger is executed
# before operation on the table row.
---error 1054
+--error ER_BAD_FIELD_ERROR
insert into t1 values (3, 3);
select * from t1;
---error 1054
+--error ER_BAD_FIELD_ERROR
update t1 set i = 2;
select * from t1;
---error 1054
+--error ER_BAD_FIELD_ERROR
delete from t1;
select * from t1;
---error 1054
+--error ER_BAD_FIELD_ERROR
load data infile '../std_data_ln/loaddata5.dat' into table t1 fields terminated by '' enclosed by '' (i, k);
select * from t1;
---error 1054
+--error ER_BAD_FIELD_ERROR
insert into t1 select 3, 3;
select * from t1;
# Both types of multi-update (on the fly and via temp table)
---error 1054
+--error ER_BAD_FIELD_ERROR
update t1, t2 set k = k + 10 where t1.i = t2.i;
select * from t1;
---error 1054
+--error ER_BAD_FIELD_ERROR
update t1, t2 set k = k + 10 where t1.i = t2.i and k < 2;
select * from t1;
# Both types of multi-delete
---error 1054
+--error ER_BAD_FIELD_ERROR
delete t1, t2 from t1 straight_join t2 where t1.i = t2.i;
select * from t1;
---error 1054
+--error ER_BAD_FIELD_ERROR
delete t2, t1 from t2 straight_join t1 where t1.i = t2.i;
select * from t1;
# Let us test REPLACE/INSERT ... ON DUPLICATE KEY UPDATE.
@@ -587,10 +587,10 @@ select * from t1;
# in ordinary INSERT we need to drop "before insert" trigger.
alter table t1 add primary key (i);
drop trigger bi;
---error 1054
+--error ER_BAD_FIELD_ERROR
insert into t1 values (2, 4) on duplicate key update k= k + 10;
select * from t1;
---error 1054
+--error ER_BAD_FIELD_ERROR
replace into t1 values (2, 4);
select * from t1;
# Also drops all triggers
@@ -608,7 +608,7 @@ insert into t1 values (1, 2);
create function bug5893 () returns int return 5;
create trigger t1_bu before update on t1 for each row set new.col1= bug5893();
drop function bug5893;
---error 1305
+--error ER_SP_DOES_NOT_EXIST
update t1 set col2 = 4;
# This should not crash server too.
drop trigger t1_bu;
@@ -908,9 +908,9 @@ create trigger t1_bi after insert on t1 for each row insert into t3 values (new.
# Until we implement proper mechanism for invalidation of PS/SP when table
# or SP's are changed these two statements will fail with 'Table ... was
# not locked' error (this mechanism should be based on the new TDC).
---error 1100
+--error 1100 #ER_TABLE_NOT_LOCKED
execute stmt1;
---error 1100
+--error 1100 #ER_TABLE_NOT_LOCKED
call p1();
deallocate prepare stmt1;
drop procedure p1;
@@ -1186,7 +1186,7 @@ INSERT INTO t1 VALUES (@x);
SELECT @x;
SET @x=2;
---error 1365
+--error ER_DIVISION_BY_ZERO
UPDATE t1 SET i1 = @x;
SELECT @x;
@@ -1197,7 +1197,7 @@ INSERT INTO t1 VALUES (@x);
SELECT @x;
SET @x=4;
---error 1365
+--error ER_DIVISION_BY_ZERO
UPDATE t1 SET i1 = @x;
SELECT @x;
@@ -1281,4 +1281,26 @@ SELECT * FROM t1;
DROP TABLE t1;
-# End of 5.0 tests
+#
+# Bug #18005: Creating a trigger on mysql.event leads to server crash on
+# scheduler startup
+#
+# Bug #18361: Triggers on mysql.user table cause server crash
+#
+# We don't allow triggers on the mysql schema
+delimiter |;
+--error ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA
+create trigger wont_work after update on mysql.user for each row
+begin
+ set @a:= 1;
+end|
+# Try when we're already using the mysql schema
+use mysql|
+--error ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA
+create trigger wont_work after update on event for each row
+begin
+ set @a:= 1;
+end|
+delimiter ;|
+
+--echo End of 5.0 tests
diff --git a/mysql-test/t/type_newdecimal.test b/mysql-test/t/type_newdecimal.test
index 35aff8b3c5a..7afdb857ca2 100644
--- a/mysql-test/t/type_newdecimal.test
+++ b/mysql-test/t/type_newdecimal.test
@@ -1097,6 +1097,17 @@ select * from t1;
drop table t1;
#
+# Bug #18014: problem with 'alter table'
+#
+
+create table t1(a decimal(7,2));
+insert into t1 values(123.12);
+select * from t1;
+alter table t1 modify a decimal(10,2);
+select * from t1;
+drop table t1;
+
+#
# Bug#19667 group by a decimal expression yields wrong result
#
create table t1 (i int, j int);
diff --git a/mysql-test/t/type_timestamp.test b/mysql-test/t/type_timestamp.test
index 9b09421dd1f..6873569d0e9 100644
--- a/mysql-test/t/type_timestamp.test
+++ b/mysql-test/t/type_timestamp.test
@@ -6,6 +6,9 @@
drop table if exists t1,t2;
--enable_warnings
+# Set timezone to GMT-3, to make it possible to use "interval 3 hour"
+set time_zone="+03:00";
+
CREATE TABLE t1 (a int, t timestamp);
CREATE TABLE t2 (a int, t datetime);
SET TIMESTAMP=1234;
@@ -322,3 +325,6 @@ select * from t1;
drop table t1;
# End of 4.1 tests
+
+# Restore timezone to default
+set time_zone= @@global.time_zone;
diff --git a/mysql-test/t/udf.test b/mysql-test/t/udf.test
index e2556692612..560ec88eb10 100644
--- a/mysql-test/t/udf.test
+++ b/mysql-test/t/udf.test
@@ -99,6 +99,24 @@ delimiter ;//
call XXX2();
drop procedure xxx2;
+#
+# Bug#19904: UDF: not initialized *is_null per row
+#
+
+CREATE TABLE bug19904(n INT, v varchar(10));
+INSERT INTO bug19904 VALUES (1,'one'),(2,'two'),(NULL,NULL),(3,'three'),(4,'four');
+SELECT myfunc_double(n) AS f FROM bug19904;
+SELECT metaphon(v) AS f FROM bug19904;
+DROP TABLE bug19904;
+
+#
+# Bug#19862: Sort with filesort by function evaluates function twice
+#
+create table t1(f1 int);
+insert into t1 values(1),(2);
+explain select myfunc_int(f1) from t1 order by 1;
+drop table t1;
+--echo End of 5.0 tests.
#
# Drop the example functions from udf_example
@@ -114,3 +132,4 @@ DROP FUNCTION lookup;
DROP FUNCTION reverse_lookup;
DROP FUNCTION avgcost;
+
diff --git a/mysql-test/t/union.test b/mysql-test/t/union.test
index 7dfe4ac482f..fdb5f968589 100644
--- a/mysql-test/t/union.test
+++ b/mysql-test/t/union.test
@@ -390,8 +390,8 @@ create table t1 SELECT da from t2 UNION select dt from t2;
select * from t1;
show create table t1;
drop table t1;
-create table t1 SELECT dt from t2 UNION select sc from t2;
-select * from t1;
+create table t1 SELECT dt from t2 UNION select trim(sc) from t2;
+select trim(dt) from t1;
show create table t1;
drop table t1;
create table t1 SELECT dt from t2 UNION select sv from t2;
@@ -795,6 +795,14 @@ drop table t1;
# End of 4.1 tests
#
+# Bug#12185: Data type aggregation may produce wrong result
+#
+create table t1(f1 char(1), f2 char(5), f3 binary(1), f4 binary(5), f5 timestamp, f6 varchar(1) character set utf8 collate utf8_general_ci, f7 text);
+create table t2 as select *, f6 as f8 from t1 union select *, f7 from t1;
+show create table t2;
+drop table t1, t2;
+
+#
# Bug#18175: Union select over 129 tables with a sum function fails.
#
(select avg(1)) union (select avg(1)) union (select avg(1)) union
diff --git a/mysql-test/t/variables.test b/mysql-test/t/variables.test
index 7aa79f0eb40..771a4ad3ed3 100644
--- a/mysql-test/t/variables.test
+++ b/mysql-test/t/variables.test
@@ -302,6 +302,22 @@ set wait_timeout=100;
set log_warnings=1;
#
+# Bugs: #20392: INSERT_ID session variable has weird value
+#
+select @@session.insert_id;
+set @save_insert_id=@@session.insert_id;
+set session insert_id=20;
+select @@session.insert_id;
+
+set session last_insert_id=100;
+select @@session.insert_id;
+select @@session.last_insert_id;
+select @@session.insert_id;
+
+set @@session.insert_id=@save_insert_id;
+select @@session.insert_id;
+
+#
# key buffer
#
diff --git a/mysql-test/t/view.test b/mysql-test/t/view.test
index b432a4c39ca..2099e07fadd 100644
--- a/mysql-test/t/view.test
+++ b/mysql-test/t/view.test
@@ -2600,4 +2600,94 @@ CREATE TABLE t2 SELECT * FROM v1;
SELECT * FROM t2;
DROP VIEW v1;
-DROP TABLE IF EXISTS t1,t2;
+DROP TABLE t1,t2;
+
+#
+# Bug#16110: insert permitted into view col w/o default value
+#
+CREATE TABLE t1 (a INT NOT NULL, b INT NULL DEFAULT NULL);
+CREATE VIEW v1 AS SELECT a, b FROM t1;
+
+INSERT INTO v1 (b) VALUES (2);
+
+SET SQL_MODE = STRICT_ALL_TABLES;
+--error 1423
+INSERT INTO v1 (b) VALUES (4);
+SET SQL_MODE = '';
+
+SELECT * FROM t1;
+
+DROP VIEW v1;
+DROP TABLE t1;
+
+#
+# Bug #18243: expression over a view column that with the REVERSE function
+#
+
+CREATE TABLE t1 (firstname text, surname text);
+INSERT INTO t1 VALUES
+ ("Bart","Simpson"),("Milhouse","van Houten"),("Montgomery","Burns");
+
+CREATE VIEW v1 AS SELECT CONCAT(firstname," ",surname) AS name FROM t1;
+SELECT CONCAT(LEFT(name,LENGTH(name)-INSTR(REVERSE(name)," ")),
+ LEFT(name,LENGTH(name)-INSTR(REVERSE(name)," "))) AS f1
+ FROM v1;
+
+DROP VIEW v1;
+DROP TABLE t1;
+
+#
+# Bug #19714: wrong type of a view column specified by an expressions over ints
+#
+
+CREATE TABLE t1 (i int, j int);
+CREATE VIEW v1 AS SELECT COALESCE(i,j) FROM t1;
+DESCRIBE v1;
+CREATE TABLE t2 SELECT COALESCE(i,j) FROM t1;
+DESCRIBE t2;
+
+DROP VIEW v1;
+DROP TABLE t1,t2;
+
+#
+# Bug #17526: views with TRIM functions
+#
+
+CREATE TABLE t1 (s varchar(10));
+INSERT INTO t1 VALUES ('yadda'), ('yady');
+
+SELECT TRIM(BOTH 'y' FROM s) FROM t1;
+CREATE VIEW v1 AS SELECT TRIM(BOTH 'y' FROM s) FROM t1;
+SELECT * FROM v1;
+DROP VIEW v1;
+
+SELECT TRIM(LEADING 'y' FROM s) FROM t1;
+CREATE VIEW v1 AS SELECT TRIM(LEADING 'y' FROM s) FROM t1;
+SELECT * FROM v1;
+DROP VIEW v1;
+
+SELECT TRIM(TRAILING 'y' FROM s) FROM t1;
+CREATE VIEW v1 AS SELECT TRIM(TRAILING 'y' FROM s) FROM t1;
+SELECT * FROM v1;
+DROP VIEW v1;
+
+DROP TABLE t1;
+
+#
+# Bug #21086: server crashes when VIEW defined with a SELECT with COLLATE
+# clause is called
+#
+CREATE TABLE t1 (s1 char);
+INSERT INTO t1 VALUES ('Z');
+
+CREATE VIEW v1 AS SELECT s1 collate latin1_german1_ci AS col FROM t1;
+
+CREATE VIEW v2 (col) AS SELECT s1 collate latin1_german1_ci FROM t1;
+
+# either of these statements will cause crash
+INSERT INTO v1 (col) VALUES ('b');
+INSERT INTO v2 (col) VALUES ('c');
+
+SELECT s1 FROM t1;
+DROP VIEW v1, v2;
+DROP TABLE t1;
diff --git a/mysql-test/t/view_grant.test b/mysql-test/t/view_grant.test
index 801bd13fab7..daba7dfaa3c 100644
--- a/mysql-test/t/view_grant.test
+++ b/mysql-test/t/view_grant.test
@@ -872,3 +872,65 @@ DROP VIEW test2.t3;
DROP TABLE test2.t1, test1.t0;
DROP DATABASE test2;
DROP DATABASE test1;
+
+
+#
+# BUG#20570: CURRENT_USER() in a VIEW with SQL SECURITY DEFINER
+# returns invoker name
+#
+--disable_warnings
+DROP VIEW IF EXISTS v1;
+DROP VIEW IF EXISTS v2;
+DROP VIEW IF EXISTS v3;
+DROP FUNCTION IF EXISTS f1;
+DROP FUNCTION IF EXISTS f2;
+DROP PROCEDURE IF EXISTS p1;
+--enable_warnings
+
+CREATE SQL SECURITY DEFINER VIEW v1 AS SELECT CURRENT_USER() AS cu;
+
+CREATE FUNCTION f1() RETURNS VARCHAR(77) SQL SECURITY INVOKER
+ RETURN CURRENT_USER();
+CREATE SQL SECURITY DEFINER VIEW v2 AS SELECT f1() AS cu;
+
+CREATE PROCEDURE p1(OUT cu VARCHAR(77)) SQL SECURITY INVOKER
+ SET cu= CURRENT_USER();
+delimiter |;
+CREATE FUNCTION f2() RETURNS VARCHAR(77) SQL SECURITY INVOKER
+BEGIN
+ DECLARE cu VARCHAR(77);
+ CALL p1(cu);
+ RETURN cu;
+END|
+delimiter ;|
+CREATE SQL SECURITY DEFINER VIEW v3 AS SELECT f2() AS cu;
+
+CREATE USER mysqltest_u1@localhost;
+GRANT ALL ON test.* TO mysqltest_u1@localhost;
+
+connect (conn1, localhost, mysqltest_u1,,);
+
+--echo
+--echo The following tests should all return 1.
+--echo
+SELECT CURRENT_USER() = 'mysqltest_u1@localhost';
+SELECT f1() = 'mysqltest_u1@localhost';
+CALL p1(@cu);
+SELECT @cu = 'mysqltest_u1@localhost';
+SELECT f2() = 'mysqltest_u1@localhost';
+SELECT cu = 'root@localhost' FROM v1;
+SELECT cu = 'root@localhost' FROM v2;
+SELECT cu = 'root@localhost' FROM v3;
+
+disconnect conn1;
+connection default;
+
+DROP VIEW v3;
+DROP FUNCTION f2;
+DROP PROCEDURE p1;
+DROP FUNCTION f1;
+DROP VIEW v2;
+DROP VIEW v1;
+DROP USER mysqltest_u1@localhost;
+
+# End of 5.0 tests.
diff --git a/mysql-test/t/wait_timeout.test b/mysql-test/t/wait_timeout.test
index 8387c08c902..dbd792e48d8 100644
--- a/mysql-test/t/wait_timeout.test
+++ b/mysql-test/t/wait_timeout.test
@@ -9,16 +9,20 @@
# Connect with another connection and reset counters
--disable_query_log
connect (wait_con,localhost,root,,test,,);
-flush status; # Reset counters
connection wait_con;
set session wait_timeout=100;
let $retries=300;
-let $aborted_clients = `SHOW STATUS LIKE 'aborted_clients'`;
set @aborted_clients= 0;
--enable_query_log
# Disable reconnect and do the query
connection default;
+# If slow host (Valgrind...), we may have already timed out here.
+# So force a reconnect if necessary, using a dummy query. And issue a
+# 'flush status' to reset the 'aborted_clients' counter.
+--enable_reconnect
+select 0;
+flush status;
--disable_reconnect
select 1;
@@ -38,6 +42,9 @@ while (!`select @aborted_clients`)
}
}
--enable_query_log
+# The server has disconnected, add small sleep to make sure
+# the disconnect has reached client
+sleep 1;
connection default;
# When the connection is closed in this way, the error code should
@@ -46,6 +53,9 @@ connection default;
select 2;
--enable_reconnect
select 3;
+# Disconnect so that we will not be confused by a future abort from this
+# connection.
+disconnect default
#
# Do the same test as above on a TCP connection
@@ -56,7 +66,6 @@ select 3;
connection wait_con;
flush status; # Reset counters
let $retries=300;
-let $aborted_clients = `SHOW STATUS LIKE 'aborted_clients'`;
set @aborted_clients= 0;
--enable_query_log
@@ -80,6 +89,9 @@ while (!`select @aborted_clients`)
}
}
--enable_query_log
+# The server has disconnected, add small sleep to make sure
+# the disconnect has reached client
+sleep 1;
connection con1;
# When the connection is closed in this way, the error code should
diff --git a/mysys/my_append.c b/mysys/my_append.c
index c3549c670c3..6d52c03c6cc 100644
--- a/mysys/my_append.c
+++ b/mysys/my_append.c
@@ -14,7 +14,6 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-#define USES_TYPES /* sys/types is included */
#include "mysys_priv.h"
#include <sys/stat.h>
#include <m_string.h>
diff --git a/mysys/my_clock.c b/mysys/my_clock.c
index 41d659c0ffe..70bb374a749 100644
--- a/mysys/my_clock.c
+++ b/mysys/my_clock.c
@@ -14,7 +14,6 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-#define USES_TYPES
#include "my_global.h"
#if !defined(_MSC_VER) && !defined(__BORLANDC__) && !defined(__NETWARE__)
diff --git a/mysys/my_copy.c b/mysys/my_copy.c
index a8a3a775040..2233c791153 100644
--- a/mysys/my_copy.c
+++ b/mysys/my_copy.c
@@ -14,7 +14,6 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-#define USES_TYPES /* sys/types is included */
#include "mysys_priv.h"
#include <my_dir.h> /* for stat */
#include <m_string.h>
diff --git a/mysys/my_create.c b/mysys/my_create.c
index d27edb31d32..e1e32b50842 100644
--- a/mysys/my_create.c
+++ b/mysys/my_create.c
@@ -14,7 +14,6 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-#define USES_TYPES
#include "mysys_priv.h"
#include <my_dir.h>
#include "mysys_err.h"
diff --git a/mysys/my_dup.c b/mysys/my_dup.c
index 9666f5b1858..1fdb4db7276 100644
--- a/mysys/my_dup.c
+++ b/mysys/my_dup.c
@@ -14,7 +14,6 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-#define USES_TYPES
#include "mysys_priv.h"
#include "mysys_err.h"
#include <my_dir.h>
diff --git a/mysys/my_handler.c b/mysys/my_handler.c
index bfec44d57a4..46144c0dff2 100644
--- a/mysys/my_handler.c
+++ b/mysys/my_handler.c
@@ -15,6 +15,7 @@
Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
MA 02111-1307, USA */
+#include <my_global.h>
#include "my_handler.h"
int mi_compare_text(CHARSET_INFO *charset_info, uchar *a, uint a_length,
diff --git a/mysys/my_lib.c b/mysys/my_lib.c
index 42c57e9d3c4..76c31a8fbae 100644
--- a/mysys/my_lib.c
+++ b/mysys/my_lib.c
@@ -17,7 +17,6 @@
/* TODO: check for overun of memory for names. */
/* Convert MSDOS-TIME to standar time_t (still needed?) */
-#define USES_TYPES /* sys/types is included */
#include "mysys_priv.h"
#include <m_string.h>
#include <my_dir.h> /* Structs used by my_dir,includes sys/types */
@@ -495,7 +494,7 @@ MY_DIR *my_dir(const char *path, myf MyFlags)
if (!(MyFlags & MY_DONT_SORT))
qsort((void *) result->dir_entry, result->number_off_files,
sizeof(FILEINFO), (qsort_cmp) comp_names);
- DBUG_PRINT(exit, ("found %d files", result->number_off_files));
+ DBUG_PRINT("exit", ("found %d files", result->number_off_files));
DBUG_RETURN(result);
error:
my_errno=errno;
diff --git a/mysys/my_malloc.c b/mysys/my_malloc.c
index 3fb3866f79c..9dd5530bd28 100644
--- a/mysys/my_malloc.c
+++ b/mysys/my_malloc.c
@@ -83,7 +83,7 @@ char *my_strdup(const char *from, myf my_flags)
}
-char *my_strndup(const byte *from, uint length, myf my_flags)
+char *my_strndup(const char *from, uint length, myf my_flags)
{
gptr ptr;
if ((ptr=my_malloc(length+1,my_flags)) != 0)
diff --git a/mysys/my_open.c b/mysys/my_open.c
index 6041ddde9fc..6e57132ae23 100644
--- a/mysys/my_open.c
+++ b/mysys/my_open.c
@@ -14,7 +14,6 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-#define USES_TYPES
#include "mysys_priv.h"
#include "mysys_err.h"
#include <my_dir.h>
diff --git a/mysys/my_redel.c b/mysys/my_redel.c
index abfe84102ef..7782190ae11 100644
--- a/mysys/my_redel.c
+++ b/mysys/my_redel.c
@@ -14,7 +14,6 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-#define USES_TYPES /* sys/types is included */
#include "mysys_priv.h"
#include <my_dir.h>
#include <m_string.h>
diff --git a/mysys/my_rename.c b/mysys/my_rename.c
index b5d813ad787..9c27238cc72 100644
--- a/mysys/my_rename.c
+++ b/mysys/my_rename.c
@@ -14,7 +14,6 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-#define USES_TYPES
#include "mysys_priv.h"
#include <my_dir.h>
#include "mysys_err.h"
diff --git a/mysys/safemalloc.c b/mysys/safemalloc.c
index e40fd751037..518a6a5fdd0 100644
--- a/mysys/safemalloc.c
+++ b/mysys/safemalloc.c
@@ -525,7 +525,7 @@ char *_my_strdup(const char *from, const char *filename, uint lineno,
} /* _my_strdup */
-char *_my_strndup(const byte *from, uint length,
+char *_my_strndup(const char *from, uint length,
const char *filename, uint lineno,
myf MyFlags)
{
diff --git a/mysys/test_dir.c b/mysys/test_dir.c
index f3d220e942f..c9693ab3c68 100644
--- a/mysys/test_dir.c
+++ b/mysys/test_dir.c
@@ -16,7 +16,6 @@
/* TODO: Test all functions */
-#define USES_TYPES
#include "mysys_priv.h"
#include "my_dir.h"
diff --git a/mysys/thr_lock.c b/mysys/thr_lock.c
index 74d6f7431a8..36cb83ae754 100644
--- a/mysys/thr_lock.c
+++ b/mysys/thr_lock.c
@@ -204,6 +204,8 @@ static void check_locks(THR_LOCK *lock, const char *where,
{
if ((int) data->type == (int) TL_READ_NO_INSERT)
count++;
+ /* Protect against infinite loop. */
+ DBUG_ASSERT(count <= lock->read_no_write_count);
}
if (count != lock->read_no_write_count)
{
diff --git a/netware/BUILD/mwasmnlm b/netware/BUILD/mwasmnlm
index 381f84ec0c8..11fc2bc3842 100755
--- a/netware/BUILD/mwasmnlm
+++ b/netware/BUILD/mwasmnlm
@@ -5,4 +5,7 @@ set -e
args=" $*"
-wine --debugmsg -all -- mwasmnlm $args
+# NOTE: Option 'pipefail' is not standard sh
+set -o pipefail
+wine --debugmsg -all -- mwasmnlm $args | \
+perl -pe 's/\r//g; s/^\e.*\e(\[J|>)?//; s/[[^:print:]]//g'
diff --git a/netware/BUILD/mwccnlm b/netware/BUILD/mwccnlm
index cb2d62fe8cf..e6840e781f8 100755
--- a/netware/BUILD/mwccnlm
+++ b/netware/BUILD/mwccnlm
@@ -7,4 +7,7 @@ set -e
# convert it to "-I../include"
args=" "`echo $* | sed -e 's/-I.\/../-I../g'`
-wine --debugmsg -all -- mwccnlm $args
+# NOTE: Option 'pipefail' is not standard sh
+set -o pipefail
+wine --debugmsg -all -- mwccnlm $args | \
+perl -pe 's/\r//g; s/^\e.*\e(\[J|>)?//; s/[[^:print:]]//g'
diff --git a/netware/BUILD/mwldnlm b/netware/BUILD/mwldnlm
index 28566fc5cb1..cc8c9e63c6e 100755
--- a/netware/BUILD/mwldnlm
+++ b/netware/BUILD/mwldnlm
@@ -5,4 +5,7 @@ set -e
args=" $*"
-wine --debugmsg -all -- mwldnlm $args
+# NOTE: Option 'pipefail' is not standard sh
+set -o pipefail
+wine --debugmsg -all -- mwldnlm $args | \
+perl -pe 's/\r//g; s/^\e.*\e(\[J|>)?//; s/[[^:print:]]//g'
diff --git a/scripts/Makefile.am b/scripts/Makefile.am
index 7cd89eee952..22c28ae5a74 100644
--- a/scripts/Makefile.am
+++ b/scripts/Makefile.am
@@ -58,7 +58,8 @@ EXTRA_SCRIPTS = make_binary_distribution.sh \
EXTRA_DIST = $(EXTRA_SCRIPTS) \
mysqlaccess.conf \
- mysqlbug
+ mysqlbug \
+ make_win_bin_dist
dist_pkgdata_DATA = fill_help_tables.sql mysql_fix_privilege_tables.sql
diff --git a/scripts/make_binary_distribution.sh b/scripts/make_binary_distribution.sh
index 1b071a294ed..58799880769 100644
--- a/scripts/make_binary_distribution.sh
+++ b/scripts/make_binary_distribution.sh
@@ -138,7 +138,7 @@ BIN_FILES="extra/comp_err$BS extra/replace$BS extra/perror$BS \
client/mysqlslap$BS \
client/mysqldump$BS client/mysqlimport$BS \
client/mysqltest$BS client/mysqlcheck$BS \
- client/mysqlbinlog$BS \
+ client/mysqlbinlog$BS client/mysql_upgrade$BS \
tests/mysql_client_test$BS \
libmysqld/examples/mysql_client_test_embedded$BS \
libmysqld/examples/mysqltest_embedded$BS \
@@ -181,11 +181,21 @@ if [ $BASE_SYSTEM = "netware" ] ; then
fi
copyfileto $BASE/lib \
- libmysql/.libs/libmysqlclient.a libmysql/.libs/libmysqlclient.so* \
- libmysql/libmysqlclient.* libmysql_r/.libs/libmysqlclient_r.a \
- libmysql_r/.libs/libmysqlclient_r.so* libmysql_r/libmysqlclient_r.* \
+ libmysql/.libs/libmysqlclient.a \
+ libmysql/.libs/libmysqlclient.so* \
+ libmysql/.libs/libmysqlclient.sl* \
+ libmysql/.libs/libmysqlclient*.dylib \
+ libmysql/libmysqlclient.* \
+ libmysql_r/.libs/libmysqlclient_r.a \
+ libmysql_r/.libs/libmysqlclient_r.so* \
+ libmysql_r/.libs/libmysqlclient_r.sl* \
+ libmysql_r/.libs/libmysqlclient_r*.dylib \
+ libmysql_r/libmysqlclient_r.* \
+ libmysqld/.libs/libmysqld.a \
+ libmysqld/.libs/libmysqld.so* \
+ libmysqld/.libs/libmysqld.sl* \
+ libmysqld/.libs/libmysqld*.dylib \
mysys/libmysys.a strings/libmystrings.a dbug/libdbug.a \
- libmysqld/.libs/libmysqld.a libmysqld/.libs/libmysqld.so* \
libmysqld/libmysqld.a netware/libmysql.imp \
zlib/.libs/libz.a
diff --git a/scripts/make_win_bin_dist b/scripts/make_win_bin_dist
new file mode 100755
index 00000000000..cebcccb56f4
--- /dev/null
+++ b/scripts/make_win_bin_dist
@@ -0,0 +1,116 @@
+#! /bin/sh
+
+NOINST_NAME=$1
+
+mkdir $NOINST_NAME
+mkdir $NOINST_NAME/bin
+cp client/release/*.exe $NOINST_NAME/bin/
+cp extra/release/*.exe $NOINST_NAME/bin/
+mv $NOINST_NAME/bin/comp_err.exe $NOINST_NAME/bin/comp-err.exe
+cp storage/myisam/release/*.exe $NOINST_NAME/bin/
+cp server-tools/instance-manager/release/*.exe $NOINST_NAME/bin/
+cp tests/release/*.exe $NOINST_NAME/bin/
+cp libmysql/release/*.exe $NOINST_NAME/bin/
+cp libmysql/release/libmysql.dll $NOINST_NAME/bin/
+
+cp sql/release/mysqld.exe $NOINST_NAME/bin/mysqld.exe
+cp sql/debug/mysqld.exe $NOINST_NAME/bin/mysqld-debug.exe
+# For Pro/Classic builds, do this instead:
+# cp sql/release/mysqld.exe $NOINST_NAME/bin/mysqld-nt.exe
+# cp sql/debug/mysqld.exe $NOINST_NAME/bin/mysqld-debug.exe
+
+cp COPYING EXCEPTIONS-CLIENT $NOINST_NAME/
+cp -dpR win/data $NOINST_NAME/data
+mkdir $NOINST_NAME/Docs
+cp Docs/INSTALL-BINARY Docs/manual.chm ChangeLog COPYING $NOINST_NAME/Docs/
+
+# These will be filled in when we enable embedded.
+mkdir -p $NOINST_NAME/Embedded/DLL/debug $NOINST_NAME/Embedded/DLL/release $NOINST_NAME/Embedded/static/release
+
+mkdir -p $NOINST_NAME/examples/libmysqltest/debug $NOINST_NAME/examples/libmysqltest/release
+cp libmysql/mytest.c libmysql/myTest.vcproj libmysql/release/myTest.exe $NOINST_NAME/examples/libmysqltest/
+cp libmysql/debug/myTest.exe $NOINST_NAME/examples/libmysqltest/debug/
+cp libmysql/release/myTest.exe $NOINST_NAME/examples/libmysqltest/release/
+
+mkdir -p $NOINST_NAME/examples/tests
+cp tests/*.res tests/*.tst tests/*.pl tests/*.c $NOINST_NAME/examples/tests/
+
+mkdir -p $NOINST_NAME/include
+cp include/conf*.h \
+ include/mysql*.h \
+ include/errmsg.h \
+ include/my_alloc.h \
+ include/my_getopt.h \
+ include/my_sys.h \
+ include/my_list.h \
+ include/my_pthread.h \
+ include/my_dbug.h \
+ include/m_string.h \
+ include/m_ctype.h \
+ include/my_global.h \
+ include/typelib.h $NOINST_NAME/include/
+cp libmysql/libmysql.def $NOINST_NAME/include/
+cp libmysqld/libmysqld.def $NOINST_NAME/include/
+
+mkdir -p $NOINST_NAME/lib/debug $NOINST_NAME/lib/opt
+cp libmysql/debug/libmysql.dll \
+ libmysql/debug/libmysql.lib \
+ client/debug/mysqlclient.lib \
+ mysys/debug/mysys.lib \
+ regex/debug/regex.lib \
+ strings/debug/strings.lib \
+ zlib/debug/zlib.lib $NOINST_NAME/lib/debug/
+cp libmysql/release/libmysql.dll \
+ libmysql/release/libmysql.lib \
+ client/release/mysqlclient.lib \
+ regex/release/regex.lib \
+ strings/release/strings.lib \
+ zlib/release/zlib.lib $NOINST_NAME/lib/opt/
+cp mysys/release/mysys.lib $NOINST_NAME/lib/opt/mysys_tls.lib
+
+cp support-files/my-*.ini $NOINST_NAME/
+
+mkdir -p $NOINST_NAME/mysql-test/include $NOINST_NAME/mysql-test/lib \
+ $NOINST_NAME/mysql-test/r $NOINST_NAME/mysql-test/std_data \
+ $NOINST_NAME/mysql-test/t $NOINST_NAME/mysql-test/extra
+cp mysql-test/mysql-test-run.pl $NOINST_NAME/mysql-test/
+cp mysql-test/README $NOINST_NAME/mysql-test/
+cp mysql-test/install_test_db.sh $NOINST_NAME/mysql-test/install_test_db
+cp mysql-test/include/*.inc $NOINST_NAME/mysql-test/include/
+cp mysql-test/lib/*.pl $NOINST_NAME/mysql-test/lib/
+cp mysql-test/lib/*.sql $NOINST_NAME/mysql-test/lib/
+cp mysql-test/r/*.require $NOINST_NAME/mysql-test/r/
+# Need this trick, or we get "argument list too long".
+ABS_DST=`pwd`/$NOINST_NAME
+(cd mysql-test/r/ && cp *.result $ABS_DST/mysql-test/r/)
+cp mysql-test/std_data/* $NOINST_NAME/mysql-test/std_data/
+cp mysql-test/t/*.disabled $NOINST_NAME/mysql-test/t/
+cp mysql-test/t/*.opt $NOINST_NAME/mysql-test/t/
+cp mysql-test/t/*.sh $NOINST_NAME/mysql-test/t/
+cp mysql-test/t/*.slave-mi $NOINST_NAME/mysql-test/t/
+cp mysql-test/t/*.sql $NOINST_NAME/mysql-test/t/
+cp mysql-test/t/*.def $NOINST_NAME/mysql-test/t/
+(cd mysql-test/t/ && cp *.test $ABS_DST/mysql-test/t/)
+cp -dpR mysql-test/extra/* $NOINST_NAME/mysql-test/extra/
+
+# This copies in the unsubstituted scripts (containing @VAR@), but that seems
+# rather better than substituting random Unix paths and architecture names
+# from the Unix bootstrap host. Not sure what the point is of including these
+# shell scripts in the Windows packaging in any case.
+mkdir -p $NOINST_NAME/scripts
+for i in `cd scripts && ls`; do \
+ if echo $i | grep -q '\.sh'; then \
+ cp scripts/$i $NOINST_NAME/scripts/`echo $i | sed -e 's/\.sh$//'`; \
+ else if [ $i = Makefile.am -o $i = Makefile.in -o -e scripts/$i.sh ] ; then \
+ : ; \
+ else \
+ cp scripts/$i $NOINST_NAME/scripts/$i; \
+ fi; fi; \
+done
+
+cp -dpR sql/share $NOINST_NAME/
+cp -dpR sql-bench $NOINST_NAME/
+rm -f $NOINST_NAME/sql-bench/*.sh $NOINST_NAME/sql-bench/Makefile*
+
+zip -r $NOINST_NAME.zip $NOINST_NAME
+rm -Rf $NOINST_NAME
diff --git a/scripts/mysqld_safe.sh b/scripts/mysqld_safe.sh
index 744a4791307..91d53c412f7 100644
--- a/scripts/mysqld_safe.sh
+++ b/scripts/mysqld_safe.sh
@@ -346,10 +346,13 @@ then
ulimit -n $open_files
append_arg_to_args "--open-files-limit=$open_files"
fi
- if test -n "$core_file_size"
- then
- ulimit -c $core_file_size
- fi
+fi
+
+# Try to set the core file size (even if we aren't root) because many systems
+# don't specify a hard limit on core file size.
+if test -n "$core_file_size"
+then
+ ulimit -c $core_file_size
fi
#
diff --git a/server-tools/instance-manager/mysqlmanager.vcproj b/server-tools/instance-manager/mysqlmanager.vcproj
index ef8b2dd017e..bbcb94fa221 100644
--- a/server-tools/instance-manager/mysqlmanager.vcproj
+++ b/server-tools/instance-manager/mysqlmanager.vcproj
@@ -34,7 +34,7 @@
<Tool
Name="VCLinkerTool"
AdditionalDependencies="wsock32.lib"
- OutputFile="$(OutDir)/mysqlmanager.exe"
+ OutputFile="../../client_debug/mysqlmanager.exe"
LinkIncremental="2"
GenerateDebugInformation="TRUE"
ProgramDatabaseFile="$(OutDir)/mysqlmanager.pdb"
@@ -82,7 +82,7 @@
<Tool
Name="VCLinkerTool"
AdditionalDependencies="wsock32.lib"
- OutputFile="$(OutDir)/mysqlmanager.exe"
+ OutputFile="../../client_release/mysqlmanager.exe"
LinkIncremental="1"
GenerateDebugInformation="TRUE"
SubSystem="1"
diff --git a/server-tools/instance-manager/parse.h b/server-tools/instance-manager/parse.h
index ae29c7eb64a..fd970f54d29 100644
--- a/server-tools/instance-manager/parse.h
+++ b/server-tools/instance-manager/parse.h
@@ -69,7 +69,7 @@ private:
inline char *Named_value::alloc_str(const LEX_STRING *str)
{
- return my_strndup((const byte *) str->str, str->length, MYF(0));
+ return my_strndup(str->str, str->length, MYF(0));
}
inline char *Named_value::alloc_str(const char *str)
diff --git a/sql-common/client.c b/sql-common/client.c
index 08d87f9d083..feaa4ed0bfd 100644
--- a/sql-common/client.c
+++ b/sql-common/client.c
@@ -125,6 +125,8 @@ static void mysql_close_free(MYSQL *mysql);
static int wait_for_data(my_socket fd, uint timeout);
#endif
+CHARSET_INFO *default_client_charset_info = &my_charset_latin1;
+
/****************************************************************************
A modified version of connect(). my_connect() allows you to specify
@@ -640,7 +642,8 @@ void free_rows(MYSQL_DATA *cur)
my_bool
cli_advanced_command(MYSQL *mysql, enum enum_server_command command,
const char *header, ulong header_length,
- const char *arg, ulong arg_length, my_bool skip_check)
+ const char *arg, ulong arg_length, my_bool skip_check,
+ MYSQL_STMT *stmt __attribute__((unused)))
{
NET *net= &mysql->net;
my_bool result= 1;
@@ -1426,7 +1429,7 @@ mysql_init(MYSQL *mysql)
bzero((char*) (mysql), sizeof(*(mysql)));
mysql->options.connect_timeout= CONNECT_TIMEOUT;
mysql->last_used_con= mysql->next_slave= mysql->master = mysql;
- mysql->charset=default_charset_info;
+ mysql->charset=default_client_charset_info;
strmov(mysql->net.sqlstate, not_error_sqlstate);
/*
By default, we are a replication pivot. The caller must reset it
@@ -1507,6 +1510,7 @@ mysql_ssl_set(MYSQL *mysql __attribute__((unused)) ,
*/
#ifdef HAVE_OPENSSL
+
static void
mysql_ssl_free(MYSQL *mysql __attribute__((unused)))
{
@@ -1531,6 +1535,7 @@ mysql_ssl_free(MYSQL *mysql __attribute__((unused)))
DBUG_VOID_RETURN;
}
+#endif /* HAVE_OPENSSL */
/*
Return the SSL cipher (if any) used for current
@@ -1546,8 +1551,10 @@ const char * STDCALL
mysql_get_ssl_cipher(MYSQL *mysql)
{
DBUG_ENTER("mysql_get_ssl_cipher");
+#ifdef HAVE_OPENSSL
if (mysql->net.vio && mysql->net.vio->ssl_arg)
DBUG_RETURN(SSL_get_cipher_name((SSL*)mysql->net.vio->ssl_arg));
+#endif /* HAVE_OPENSSL */
DBUG_RETURN(NULL);
}
@@ -1566,6 +1573,9 @@ mysql_get_ssl_cipher(MYSQL *mysql)
1 Failed to validate server
*/
+
+#ifdef HAVE_OPENSSL
+
static int ssl_verify_server_cert(Vio *vio, const char* server_hostname)
{
SSL *ssl;
@@ -1655,7 +1665,80 @@ static MYSQL_METHODS client_methods=
#endif
};
-MYSQL *
+C_MODE_START
+int mysql_init_character_set(MYSQL *mysql)
+{
+ NET *net= &mysql->net;
+ const char *default_collation_name;
+
+ /* Set character set */
+ if (!mysql->options.charset_name)
+ {
+ default_collation_name= MYSQL_DEFAULT_COLLATION_NAME;
+ if (!(mysql->options.charset_name=
+ my_strdup(MYSQL_DEFAULT_CHARSET_NAME,MYF(MY_WME))))
+ return 1;
+ }
+ else
+ default_collation_name= NULL;
+
+ {
+ const char *save= charsets_dir;
+ if (mysql->options.charset_dir)
+ charsets_dir=mysql->options.charset_dir;
+ mysql->charset=get_charset_by_csname(mysql->options.charset_name,
+ MY_CS_PRIMARY, MYF(MY_WME));
+ if (mysql->charset && default_collation_name)
+ {
+ CHARSET_INFO *collation;
+ if ((collation=
+ get_charset_by_name(default_collation_name, MYF(MY_WME))))
+ {
+ if (!my_charset_same(mysql->charset, collation))
+ {
+ my_printf_error(ER_UNKNOWN_ERROR,
+ "COLLATION %s is not valid for CHARACTER SET %s",
+ MYF(0),
+ default_collation_name, mysql->options.charset_name);
+ mysql->charset= NULL;
+ }
+ else
+ {
+ mysql->charset= collation;
+ }
+ }
+ else
+ mysql->charset= NULL;
+ }
+ charsets_dir= save;
+ }
+
+ if (!mysql->charset)
+ {
+ net->last_errno=CR_CANT_READ_CHARSET;
+ strmov(net->sqlstate, unknown_sqlstate);
+ if (mysql->options.charset_dir)
+ my_snprintf(net->last_error, sizeof(net->last_error)-1,
+ ER(net->last_errno),
+ mysql->options.charset_name,
+ mysql->options.charset_dir);
+ else
+ {
+ char cs_dir_name[FN_REFLEN];
+ get_charsets_dir(cs_dir_name);
+ my_snprintf(net->last_error, sizeof(net->last_error)-1,
+ ER(net->last_errno),
+ mysql->options.charset_name,
+ cs_dir_name);
+ }
+ return 1;
+ }
+ return 0;
+}
+C_MODE_END
+
+
+MYSQL * STDCALL
CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user,
const char *passwd, const char *db,
uint port, const char *unix_socket,ulong client_flag)
@@ -1992,42 +2075,8 @@ CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user,
goto error;
}
- /* Set character set */
- if (!mysql->options.charset_name &&
- !(mysql->options.charset_name=
- my_strdup(MYSQL_DEFAULT_CHARSET_NAME,MYF(MY_WME))))
- goto error;
-
- {
- const char *save= charsets_dir;
- if (mysql->options.charset_dir)
- charsets_dir=mysql->options.charset_dir;
- mysql->charset=get_charset_by_csname(mysql->options.charset_name,
- MY_CS_PRIMARY, MYF(MY_WME));
- charsets_dir= save;
- }
-
- if (!mysql->charset)
- {
- net->last_errno=CR_CANT_READ_CHARSET;
- strmov(net->sqlstate, unknown_sqlstate);
- if (mysql->options.charset_dir)
- my_snprintf(net->last_error, sizeof(net->last_error)-1,
- ER(net->last_errno),
- mysql->options.charset_name,
- mysql->options.charset_dir);
- else
- {
- char cs_dir_name[FN_REFLEN];
- get_charsets_dir(cs_dir_name);
- my_snprintf(net->last_error, sizeof(net->last_error)-1,
- ER(net->last_errno),
- mysql->options.charset_name,
- cs_dir_name);
- }
+ if (mysql_init_character_set(mysql))
goto error;
- }
-
/* Save connection information */
if (!my_multi_malloc(MYF(0),
diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt
index 9a97b79813b..95073b95ad6 100644
--- a/sql/CMakeLists.txt
+++ b/sql/CMakeLists.txt
@@ -53,7 +53,7 @@ ADD_EXECUTABLE(mysqld ../sql-common/client.c derror.cc des_key_file.cc
time.cc tztime.cc uniques.cc unireg.cc item_xmlfunc.cc
rpl_tblmap.cc sql_binlog.cc event_scheduler.cc event_timed.cc
sql_tablespace.cc events.cc ../sql-common/my_user.c
- partition_info.cc rpl_injector.cc
+ partition_info.cc rpl_injector.cc sql_locale.cc
${PROJECT_SOURCE_DIR}/sql/sql_yacc.cc
${PROJECT_SOURCE_DIR}/sql/sql_yacc.h
${PROJECT_SOURCE_DIR}/include/mysqld_error.h
diff --git a/sql/Makefile.am b/sql/Makefile.am
index 387f18c2ae9..e453bd6010f 100644
--- a/sql/Makefile.am
+++ b/sql/Makefile.am
@@ -80,7 +80,7 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc sql_partition.cc \
mysqld.cc password.c hash_filo.cc hostname.cc \
set_var.cc sql_parse.cc sql_yacc.yy \
sql_base.cc table.cc sql_select.cc sql_insert.cc \
- sql_prepare.cc sql_error.cc \
+ sql_prepare.cc sql_error.cc sql_locale.cc \
sql_update.cc sql_delete.cc uniques.cc sql_do.cc \
procedure.cc item_uniq.cc sql_test.cc \
log.cc log_event.cc init.cc derror.cc sql_acl.cc \
diff --git a/sql/event_timed.cc b/sql/event_timed.cc
index 4ec875f32a3..98369e0e055 100644
--- a/sql/event_timed.cc
+++ b/sql/event_timed.cc
@@ -143,24 +143,13 @@ Event_timed::init_name(THD *thd, sp_name *spn)
MEM_ROOT *root= thd->mem_root;
/* We have to copy strings to get them into the right memroot */
- if (spn)
- {
- dbname.length= spn->m_db.length;
- if (spn->m_db.length == 0)
- dbname.str= NULL;
- else
- dbname.str= strmake_root(root, spn->m_db.str, spn->m_db.length);
- name.length= spn->m_name.length;
- name.str= strmake_root(root, spn->m_name.str, spn->m_name.length);
+ dbname.length= spn->m_db.length;
+ dbname.str= strmake_root(root, spn->m_db.str, spn->m_db.length);
+ name.length= spn->m_name.length;
+ name.str= strmake_root(root, spn->m_name.str, spn->m_name.length);
- if (spn->m_qname.length == 0)
- spn->init_qname(thd);
- }
- else if (thd->db)
- {
- dbname.length= thd->db_length;
- dbname.str= strmake_root(root, thd->db, dbname.length);
- }
+ if (spn->m_qname.length == 0)
+ spn->init_qname(thd);
DBUG_PRINT("dbname", ("len=%d db=%s",dbname.length, dbname.str));
DBUG_PRINT("name", ("len=%d name=%s",name.length, name.str));
diff --git a/sql/events.cc b/sql/events.cc
index d67c42326e3..210cc2c4735 100644
--- a/sql/events.cc
+++ b/sql/events.cc
@@ -598,8 +598,9 @@ db_create_event(THD *thd, Event_timed *et, my_bool create_if_not,
int ret= 0;
CHARSET_INFO *scs= system_charset_info;
TABLE *table;
- char olddb[128];
- bool dbchanged= false;
+ char old_db_buf[NAME_LEN+1];
+ LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) };
+ bool dbchanged= FALSE;
DBUG_ENTER("db_create_event");
DBUG_PRINT("enter", ("name: %.*s", et->name.length, et->name.str));
@@ -626,8 +627,7 @@ db_create_event(THD *thd, Event_timed *et, my_bool create_if_not,
}
DBUG_PRINT("info", ("non-existant, go forward"));
- if ((ret= sp_use_new_db(thd, et->dbname.str,olddb, sizeof(olddb),0,
- &dbchanged)))
+ if ((ret= sp_use_new_db(thd, et->dbname, &old_db, 0, &dbchanged)))
{
my_error(ER_BAD_DB_ERROR, MYF(0));
goto err;
@@ -691,14 +691,14 @@ db_create_event(THD *thd, Event_timed *et, my_bool create_if_not,
*rows_affected= 1;
ok:
if (dbchanged)
- (void) mysql_change_db(thd, olddb, 1);
+ (void) mysql_change_db(thd, old_db.str, 1);
if (table)
close_thread_tables(thd);
DBUG_RETURN(EVEX_OK);
err:
if (dbchanged)
- (void) mysql_change_db(thd, olddb, 1);
+ (void) mysql_change_db(thd, old_db.str, 1);
if (table)
close_thread_tables(thd);
DBUG_RETURN(EVEX_GENERAL_ERROR);
diff --git a/sql/field.cc b/sql/field.cc
index bb4530dc377..4a9487ff7a9 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -1536,7 +1536,8 @@ bool Field::optimize_range(uint idx, uint part)
}
-Field *Field::new_field(MEM_ROOT *root, struct st_table *new_table)
+Field *Field::new_field(MEM_ROOT *root, struct st_table *new_table,
+ bool keep_type __attribute__((unused)))
{
Field *tmp;
if (!(tmp= (Field*) memdup_root(root,(char*) this,size_of())))
@@ -1561,7 +1562,7 @@ Field *Field::new_key_field(MEM_ROOT *root, struct st_table *new_table,
uint new_null_bit)
{
Field *tmp;
- if ((tmp= new_field(root, new_table)))
+ if ((tmp= new_field(root, new_table, table == new_table)))
{
tmp->ptr= new_ptr;
tmp->null_ptr= new_null_ptr;
@@ -2619,6 +2620,18 @@ void Field_new_decimal::sql_type(String &str) const
}
+uint Field_new_decimal::is_equal(create_field *new_field)
+{
+ return ((new_field->sql_type == real_type()) &&
+ ((new_field->flags & UNSIGNED_FLAG) ==
+ (uint) (flags & UNSIGNED_FLAG)) &&
+ ((new_field->flags & AUTO_INCREMENT_FLAG) ==
+ (uint) (flags & AUTO_INCREMENT_FLAG)) &&
+ (new_field->length == max_length()) &&
+ (new_field->decimals == dec));
+}
+
+
/****************************************************************************
** tiny int
****************************************************************************/
@@ -4572,6 +4585,17 @@ Field_timestamp::Field_timestamp(char *ptr_arg, uint32 len_arg,
}
+Field_timestamp::Field_timestamp(bool maybe_null_arg,
+ const char *field_name_arg,
+ CHARSET_INFO *cs)
+ :Field_str((char*) 0, 19, maybe_null_arg ? (uchar*) "": 0, 0,
+ NONE, field_name_arg, cs)
+{
+ /* For 4.0 MYD and 4.0 InnoDB compatibility */
+ flags|= ZEROFILL_FLAG | UNSIGNED_FLAG;
+}
+
+
/*
Get auto-set type for TIMESTAMP field.
@@ -6371,11 +6395,12 @@ uint Field_string::max_packed_col_length(uint max_length)
}
-Field *Field_string::new_field(MEM_ROOT *root, struct st_table *new_table)
+Field *Field_string::new_field(MEM_ROOT *root, struct st_table *new_table,
+ bool keep_type)
{
Field *field;
- if (type() != MYSQL_TYPE_VAR_STRING || table == new_table)
- return Field::new_field(root, new_table);
+ if (type() != MYSQL_TYPE_VAR_STRING || keep_type)
+ return Field::new_field(root, new_table, keep_type);
/*
Old VARCHAR field which should be modified to a VARCHAR on copy
@@ -6384,17 +6409,7 @@ Field *Field_string::new_field(MEM_ROOT *root, struct st_table *new_table)
*/
if ((field= new Field_varstring(field_length, maybe_null(), field_name,
new_table->s, charset())))
- {
field->init(new_table);
- /*
- delayed_insert::get_local_table() needs a ptr copied from old table.
- This is what other new_field() methods do too. The above method of
- Field_varstring sets ptr to NULL.
- */
- field->ptr= ptr;
- field->null_ptr= null_ptr;
- field->null_bit= null_bit;
- }
return field;
}
@@ -6896,9 +6911,11 @@ int Field_varstring::cmp_binary(const char *a_ptr, const char *b_ptr,
}
-Field *Field_varstring::new_field(MEM_ROOT *root, struct st_table *new_table)
+Field *Field_varstring::new_field(MEM_ROOT *root, struct st_table *new_table,
+ bool keep_type)
{
- Field_varstring *res= (Field_varstring*) Field::new_field(root, new_table);
+ Field_varstring *res= (Field_varstring*) Field::new_field(root, new_table,
+ keep_type);
if (res)
res->length_bytes= length_bytes;
return res;
diff --git a/sql/field.h b/sql/field.h
index 55f2c037109..3fba0ffbb00 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -219,7 +219,8 @@ public:
*/
virtual bool can_be_compared_as_longlong() const { return FALSE; }
virtual void free() {}
- virtual Field *new_field(MEM_ROOT *root, struct st_table *new_table);
+ virtual Field *new_field(MEM_ROOT *root, struct st_table *new_table,
+ bool keep_type);
virtual Field *new_key_field(MEM_ROOT *root, struct st_table *new_table,
char *new_ptr, uchar *new_null_ptr,
uint new_null_bit);
@@ -516,6 +517,7 @@ public:
uint32 max_length() { return field_length; }
uint size_of() const { return sizeof(*this); }
uint32 pack_length() const { return (uint32) bin_size; }
+ uint is_equal(create_field *new_field);
};
@@ -792,6 +794,8 @@ public:
uchar *null_ptr_arg, uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
TABLE_SHARE *share, CHARSET_INFO *cs);
+ Field_timestamp(bool maybe_null_arg, const char *field_name_arg,
+ CHARSET_INFO *cs);
enum_field_types type() const { return FIELD_TYPE_TIMESTAMP;}
enum ha_base_keytype key_type() const { return HA_KEYTYPE_ULONG_INT; }
enum Item_result cmp_type () const { return INT_RESULT; }
@@ -1044,7 +1048,7 @@ public:
enum_field_types real_type() const { return FIELD_TYPE_STRING; }
bool has_charset(void) const
{ return charset() == &my_charset_bin ? FALSE : TRUE; }
- Field *new_field(MEM_ROOT *root, struct st_table *new_table);
+ Field *new_field(MEM_ROOT *root, struct st_table *new_table, bool keep_type);
};
@@ -1117,7 +1121,7 @@ public:
enum_field_types real_type() const { return MYSQL_TYPE_VARCHAR; }
bool has_charset(void) const
{ return charset() == &my_charset_bin ? FALSE : TRUE; }
- Field *new_field(MEM_ROOT *root, struct st_table *new_table);
+ Field *new_field(MEM_ROOT *root, struct st_table *new_table, bool keep_type);
Field *new_key_field(MEM_ROOT *root, struct st_table *new_table,
char *new_ptr, uchar *new_null_ptr,
uint new_null_bit);
@@ -1142,6 +1146,21 @@ public:
{
flags|= BLOB_FLAG;
}
+ Field_blob(uint32 len_arg,bool maybe_null_arg, const char *field_name_arg,
+ CHARSET_INFO *cs, bool set_packlength)
+ :Field_longstr((char*) 0,len_arg, maybe_null_arg ? (uchar*) "": 0, 0,
+ NONE, field_name_arg, cs)
+ {
+ flags|= BLOB_FLAG;
+ packlength= 4;
+ if (set_packlength)
+ {
+ uint32 char_length= len_arg/cs->mbmaxlen;
+ packlength= char_length <= 255 ? 1 :
+ char_length <= 65535 ? 2 :
+ char_length <= 16777215 ? 3 : 4;
+ }
+ }
enum_field_types type() const { return FIELD_TYPE_BLOB;}
enum ha_base_keytype key_type() const
{ return binary() ? HA_KEYTYPE_VARBINARY2 : HA_KEYTYPE_VARTEXT2; }
diff --git a/sql/field_conv.cc b/sql/field_conv.cc
index 3eab782d167..20d1e372a2c 100644
--- a/sql/field_conv.cc
+++ b/sql/field_conv.cc
@@ -674,9 +674,14 @@ void field_conv(Field *to,Field *from)
{ // Be sure the value is stored
Field_blob *blob=(Field_blob*) to;
from->val_str(&blob->value);
- if (!blob->value.is_alloced() &&
- from->real_type() != MYSQL_TYPE_STRING &&
- from->real_type() != MYSQL_TYPE_VARCHAR)
+ /*
+ Copy value if copy_blobs is set, or source is not a string and
+ we have a pointer to its internal string conversion buffer.
+ */
+ if (to->table->copy_blobs ||
+ (!blob->value.is_alloced() &&
+ from->real_type() != MYSQL_TYPE_STRING &&
+ from->real_type() != MYSQL_TYPE_VARCHAR))
blob->value.copy();
blob->store(blob->value.ptr(),blob->value.length(),from->charset());
return;
diff --git a/sql/ha_federated.cc b/sql/ha_federated.cc
index 91111a433dc..7fb5bf12016 100644
--- a/sql/ha_federated.cc
+++ b/sql/ha_federated.cc
@@ -32,13 +32,14 @@
so to read, that data has to be parsed into fields, to write, fields have to
be stored in this format to write to this data file.
- With MySQL Federated storage engine, there will be no local files for each
- table's data (such as .MYD). A foreign database will store the data that would
- normally be in this file. This will necessitate the use of MySQL client API
- to read, delete, update, insert this data. The data will have to be retrieve
- via an SQL call "SELECT * FROM users". Then, to read this data, it will have
- to be retrieved via mysql_fetch_row one row at a time, then converted from
- the column in this select into the format that the handler expects.
+ With MySQL Federated storage engine, there will be no local files
+ for each table's data (such as .MYD). A foreign database will store
+ the data that would normally be in this file. This will necessitate
+ the use of MySQL client API to read, delete, update, insert this
+ data. The data will have to be retrieve via an SQL call "SELECT *
+ FROM users". Then, to read this data, it will have to be retrieved
+ via mysql_fetch_row one row at a time, then converted from the
+ column in this select into the format that the handler expects.
The create table will simply create the .frm file, and within the
"CREATE TABLE" SQL, there SHALL be any of the following :
@@ -385,8 +386,8 @@ static handler *federated_create_handler(TABLE_SHARE *table,
static byte *federated_get_key(FEDERATED_SHARE *share, uint *length,
my_bool not_used __attribute__ ((unused)))
{
- *length= share->table_name_length;
- return (byte*) share->table_name;
+ *length= share->connect_string_length;
+ return (byte*) share->scheme;
}
/*
@@ -415,7 +416,7 @@ int federated_db_init()
if (pthread_mutex_init(&federated_mutex, MY_MUTEX_INIT_FAST))
goto error;
- if (!hash_init(&federated_open_tables, system_charset_info, 32, 0, 0,
+ if (!hash_init(&federated_open_tables, &my_charset_bin, 32, 0, 0,
(hash_get_key) federated_get_key, 0, 0))
{
federated_init= TRUE;
@@ -510,6 +511,7 @@ static int check_foreign_data_source(FEDERATED_SHARE *share,
}
else
{
+ int escaped_table_name_length= 0;
/*
Since we do not support transactions at this version, we can let the
client API silently reconnect. For future versions, we will need more
@@ -528,17 +530,16 @@ static int check_foreign_data_source(FEDERATED_SHARE *share,
query.append(FEDERATED_STAR);
query.append(FEDERATED_FROM);
query.append(FEDERATED_BTICK);
- escape_string_for_mysql(&my_charset_bin, (char *)escaped_table_name,
+ escaped_table_name_length=
+ escape_string_for_mysql(&my_charset_bin, (char*)escaped_table_name,
sizeof(escaped_table_name),
share->table_name,
share->table_name_length);
- query.append(escaped_table_name);
+ query.append(escaped_table_name, escaped_table_name_length);
query.append(FEDERATED_BTICK);
query.append(FEDERATED_WHERE);
query.append(FEDERATED_FALSE);
- DBUG_PRINT("info", ("check_foreign_data_source query %s",
- query.c_ptr_quick()));
if (mysql_real_query(mysql, query.ptr(), query.length()))
{
error_code= table_create_flag ?
@@ -629,13 +630,11 @@ static int parse_url(FEDERATED_SHARE *share, TABLE *table,
DBUG_PRINT("info", ("Length: %d", table->s->connect_string.length));
DBUG_PRINT("info", ("String: '%.*s'", table->s->connect_string.length,
table->s->connect_string.str));
- share->scheme= my_strndup((const byte*)table->s->
- connect_string.str,
- table->s->connect_string.length,
- MYF(0));
+ share->scheme= my_strndup(table->s->connect_string.str,
+ table->s->connect_string.length,
+ MYF(0));
- // Add a null for later termination of table name
- share->scheme[table->s->connect_string.length]= 0;
+ share->connect_string_length= table->s->connect_string.length;
DBUG_PRINT("info",("parse_url alloced share->scheme %lx", share->scheme));
/*
@@ -701,7 +700,7 @@ static int parse_url(FEDERATED_SHARE *share, TABLE *table,
share->table_name++;
share->table_name_length= strlen(share->table_name);
-
+
/* make sure there's not an extra / */
if ((strchr(share->table_name, '/')))
goto error;
@@ -737,8 +736,7 @@ error:
ha_federated::ha_federated(TABLE_SHARE *table_arg)
:handler(&federated_hton, table_arg),
- mysql(0), stored_result(0),
- ref_length(sizeof(MYSQL_ROW_OFFSET)), current_position(0)
+ mysql(0), stored_result(0)
{
trx_next= 0;
}
@@ -749,8 +747,9 @@ ha_federated::ha_federated(TABLE_SHARE *table_arg)
SYNOPSIS
convert_row_to_internal_format()
- record Byte pointer to record
- row MySQL result set row from fetchrow()
+ record Byte pointer to record
+ row MySQL result set row from fetchrow()
+ result Result set to use
DESCRIPTION
This method simply iterates through a row returned via fetchrow with
@@ -763,15 +762,16 @@ ha_federated::ha_federated(TABLE_SHARE *table_arg)
0 After fields have had field values stored from record
*/
-uint ha_federated::convert_row_to_internal_format(byte *record, MYSQL_ROW row)
+uint ha_federated::convert_row_to_internal_format(byte *record,
+ MYSQL_ROW row,
+ MYSQL_RES *result)
{
ulong *lengths;
Field **field;
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
DBUG_ENTER("ha_federated::convert_row_to_internal_format");
- lengths= mysql_fetch_lengths(stored_result);
- memset(record, 0, table->s->null_bytes);
+ lengths= mysql_fetch_lengths(result);
for (field= table->field; *field; field++, row++, lengths++)
{
@@ -1316,12 +1316,11 @@ err:
static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table)
{
- char *select_query, *tmp_table_name;
+ char *select_query;
char query_buffer[FEDERATED_QUERY_BUFFER_SIZE];
- uint tmp_table_name_length;
Field **field;
String query(query_buffer, sizeof(query_buffer), &my_charset_bin);
- FEDERATED_SHARE *share;
+ FEDERATED_SHARE *share= NULL, tmp_share;
/*
In order to use this string, we must first zero it's length,
or it will contain garbage
@@ -1329,12 +1328,15 @@ static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table)
query.length(0);
pthread_mutex_lock(&federated_mutex);
- tmp_table_name= table->s->table_name.str;
- tmp_table_name_length= table->s->table_name.length;
+ if (parse_url(&tmp_share, table, 0))
+ goto error;
+
+ /* TODO: change tmp_share.scheme to LEX_STRING object */
if (!(share= (FEDERATED_SHARE *) hash_search(&federated_open_tables,
- (byte*) table_name,
- tmp_table_name_length)))
+ (byte*) tmp_share.scheme,
+ tmp_share.
+ connect_string_length)))
{
query.set_charset(system_charset_info);
query.append(FEDERATED_SELECT);
@@ -1352,18 +1354,15 @@ static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table)
if (!(share= (FEDERATED_SHARE *)
my_multi_malloc(MYF(MY_WME),
&share, sizeof(*share),
- &tmp_table_name, tmp_table_name_length+ 1,
&select_query,
query.length()+table->s->connect_string.length+1,
NullS)))
- {
- pthread_mutex_unlock(&federated_mutex);
- return NULL;
- }
-
- if (parse_url(share, table, 0))
goto error;
+ memcpy(share, &tmp_share, sizeof(tmp_share));
+
+ share->table_name_length= strlen(share->table_name);
+ /* TODO: share->table_name to LEX_STRING object */
query.append(share->table_name, share->table_name_length);
query.append(FEDERATED_BTICK);
share->select_query= select_query;
@@ -1384,11 +1383,8 @@ static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table)
error:
pthread_mutex_unlock(&federated_mutex);
- if (share->scheme)
- {
- my_free((gptr) share->scheme, MYF(0));
- share->scheme= 0;
- }
+ my_free((gptr) tmp_share.scheme, MYF(MY_ALLOW_ZERO_PTR));
+ my_free((gptr) share, MYF(MY_ALLOW_ZERO_PTR));
return NULL;
}
@@ -1408,13 +1404,7 @@ static int free_share(FEDERATED_SHARE *share)
{
hash_delete(&federated_open_tables, (byte*) share);
my_free((gptr) share->scheme, MYF(MY_ALLOW_ZERO_PTR));
- share->scheme= 0;
- if (share->socket)
- {
- my_free((gptr) share->socket, MYF(MY_ALLOW_ZERO_PTR));
- share->socket= 0;
- }
-
+ my_free((gptr) share->socket, MYF(MY_ALLOW_ZERO_PTR));
thr_lock_delete(&share->lock);
VOID(pthread_mutex_destroy(&share->mutex));
my_free((gptr) share, MYF(0));
@@ -1476,14 +1466,15 @@ int ha_federated::open(const char *name, int mode, uint test_if_locked)
/* Connect to foreign database mysql_real_connect() */
mysql= mysql_init(0);
- if (!mysql_real_connect(mysql,
- share->hostname,
- share->username,
- share->password,
- share->database,
- share->port,
- share->socket, 0))
+ if (!mysql || !mysql_real_connect(mysql,
+ share->hostname,
+ share->username,
+ share->password,
+ share->database,
+ share->port,
+ share->socket, 0))
{
+ free_share(share);
DBUG_RETURN(stash_remote_error());
}
/*
@@ -1493,6 +1484,11 @@ int ha_federated::open(const char *name, int mode, uint test_if_locked)
*/
mysql->reconnect= 1;
+ ref_length= (table->s->primary_key != MAX_KEY ?
+ table->key_info[table->s->primary_key].key_length :
+ table->s->reclength);
+ DBUG_PRINT("info", ("ref_length: %u", ref_length));
+
DBUG_RETURN(0);
}
@@ -1516,13 +1512,12 @@ int ha_federated::close(void)
/* free the result set */
if (stored_result)
{
- DBUG_PRINT("info",
- ("mysql_free_result result at address %lx", stored_result));
mysql_free_result(stored_result);
stored_result= 0;
}
/* Disconnect from mysql */
- mysql_close(mysql);
+ if (mysql) // QQ is this really needed
+ mysql_close(mysql);
retval= free_share(share);
DBUG_RETURN(retval);
@@ -1688,8 +1683,6 @@ int ha_federated::write_row(byte *buf)
/* add the values */
insert_string.append(values_string);
- DBUG_PRINT("info", ("insert query %s", insert_string.c_ptr_quick()));
-
if (mysql_real_query(mysql, insert_string.ptr(), insert_string.length()))
{
DBUG_RETURN(stash_remote_error());
@@ -1710,14 +1703,15 @@ int ha_federated::write_row(byte *buf)
This method ensures that last_insert_id() works properly. What it simply does
is calls last_insert_id() on the foreign database immediately after insert
(if the table has an auto_increment field) and sets the insert id via
- thd->insert_id(ID) (as well as storing thd->prev_insert_id)
+ thd->insert_id(ID)).
*/
void ha_federated::update_auto_increment(void)
{
THD *thd= current_thd;
DBUG_ENTER("ha_federated::update_auto_increment");
- thd->insert_id(mysql->last_used_con->insert_id);
+ thd->first_successful_insert_id_in_cur_stmt=
+ mysql->last_used_con->insert_id;
DBUG_PRINT("info",("last_insert_id %d", stats.auto_increment_value));
DBUG_VOID_RETURN;
@@ -1765,7 +1759,7 @@ int ha_federated::repair(THD* thd, HA_CHECK_OPT* check_opt)
query.append(FEDERATED_EXTENDED);
if (check_opt->sql_flags & TT_USEFRM)
query.append(FEDERATED_USE_FRM);
-
+
if (mysql_real_query(mysql, query.ptr(), query.length()))
{
DBUG_RETURN(stash_remote_error());
@@ -1781,7 +1775,7 @@ int ha_federated::repair(THD* thd, HA_CHECK_OPT* check_opt)
it.
Keep in mind that the server can do updates based on ordering if an ORDER BY
- clause was used. Consecutive ordering is not guarenteed.
+ clause was used. Consecutive ordering is not guaranteed.
Currently new_data will not have an updated auto_increament record, or
and updated timestamp field. You can do these for federated by doing these:
if (table->timestamp_on_update_now)
@@ -1807,22 +1801,16 @@ int ha_federated::update_row(const byte *old_data, byte *new_data)
this.
*/
bool has_a_primary_key= test(table->s->primary_key != MAX_KEY);
- /*
+ /*
buffers for following strings
*/
- char old_field_value_buffer[STRING_BUFFER_USUAL_SIZE];
- char new_field_value_buffer[STRING_BUFFER_USUAL_SIZE];
+ char field_value_buffer[STRING_BUFFER_USUAL_SIZE];
char update_buffer[FEDERATED_QUERY_BUFFER_SIZE];
char where_buffer[FEDERATED_QUERY_BUFFER_SIZE];
- /* stores the value to be replaced of the field were are updating */
- String old_field_value(old_field_value_buffer,
- sizeof(old_field_value_buffer),
- &my_charset_bin);
- /* stores the new value of the field */
- String new_field_value(new_field_value_buffer,
- sizeof(new_field_value_buffer),
- &my_charset_bin);
+ /* Work area for field values */
+ String field_value(field_value_buffer, sizeof(field_value_buffer),
+ &my_charset_bin);
/* stores the update query */
String update_string(update_buffer,
sizeof(update_buffer),
@@ -1832,11 +1820,10 @@ int ha_federated::update_row(const byte *old_data, byte *new_data)
sizeof(where_buffer),
&my_charset_bin);
DBUG_ENTER("ha_federated::update_row");
- /*
+ /*
set string lengths to 0 to avoid misc chars in string
*/
- old_field_value.length(0);
- new_field_value.length(0);
+ field_value.length(0);
update_string.length(0);
where_string.length(0);
@@ -1850,8 +1837,8 @@ int ha_federated::update_row(const byte *old_data, byte *new_data)
In this loop, we want to match column names to values being inserted
(while building INSERT statement).
- Iterate through table->field (new data) and share->old_filed (old_data)
- using the same index to created an SQL UPDATE statement, new data is
+ Iterate through table->field (new data) and share->old_field (old_data)
+ using the same index to create an SQL UPDATE statement. New data is
used to create SET field=value and old data is used to create WHERE
field=oldvalue
*/
@@ -1860,21 +1847,22 @@ int ha_federated::update_row(const byte *old_data, byte *new_data)
{
if (bitmap_is_set(table->write_set, (*field)->field_index))
{
+ update_string.append((*field)->field_name);
+ update_string.append(FEDERATED_EQ);
+
if ((*field)->is_null())
- new_field_value.append(FEDERATED_NULL);
+ update_string.append(FEDERATED_NULL);
else
{
my_bitmap_map *old_map= tmp_use_all_columns(table, table->read_set);
/* otherwise = */
- (*field)->val_str(&new_field_value);
- (*field)->quote_data(&new_field_value);
+ (*field)->val_str(&field_value);
+ (*field)->quote_data(&field_value);
+ update_string.append(field_value);
+ field_value.length(0);
tmp_restore_column_map(table->read_set, old_map);
}
- update_string.append((*field)->field_name);
- update_string.append(FEDERATED_EQ);
- update_string.append(new_field_value);
update_string.append(FEDERATED_COMMA);
- new_field_value.length(0);
}
if (bitmap_is_set(table->read_set, (*field)->field_index))
@@ -1885,11 +1873,11 @@ int ha_federated::update_row(const byte *old_data, byte *new_data)
else
{
where_string.append(FEDERATED_EQ);
- (*field)->val_str(&old_field_value,
+ (*field)->val_str(&field_value,
(char*) (old_data + (*field)->offset()));
- (*field)->quote_data(&old_field_value);
- where_string.append(old_field_value);
- old_field_value.length(0);
+ (*field)->quote_data(&field_value);
+ where_string.append(field_value);
+ field_value.length(0);
}
where_string.append(FEDERATED_AND);
}
@@ -1921,7 +1909,7 @@ int ha_federated::update_row(const byte *old_data, byte *new_data)
/*
This will delete a row. 'buf' will contain a copy of the row to be =deleted.
The server will call this right after the current row has been called (from
- either a previous rnd_nexT() or index call).
+ either a previous rnd_next() or index call).
If you keep a pointer to the last row or can access a primary key it will
make doing the deletion quite a bit easier.
Keep in mind that the server does no guarentee consecutive deletions.
@@ -1987,6 +1975,7 @@ int ha_federated::delete_row(const byte *buf)
DBUG_RETURN(stash_remote_error());
}
stats.deleted+= mysql->affected_rows;
+ stats.records-= mysql->affected_rows;
DBUG_PRINT("info",
("rows deleted %d rows deleted for all time %d",
int(mysql->affected_rows), stats.deleted));
@@ -2003,12 +1992,15 @@ int ha_federated::delete_row(const byte *buf)
*/
int ha_federated::index_read(byte *buf, const byte *key,
- uint key_len, enum ha_rkey_function find_flag)
+ uint key_len, ha_rkey_function find_flag)
{
- int retval;
DBUG_ENTER("ha_federated::index_read");
- retval= index_read_idx(buf, active_index, key, key_len, find_flag);
- DBUG_RETURN(retval);
+
+ if (stored_result)
+ mysql_free_result(stored_result);
+ DBUG_RETURN(index_read_idx_with_result_set(buf, active_index, key,
+ key_len, find_flag,
+ &stored_result));
}
@@ -2017,26 +2009,60 @@ int ha_federated::index_read(byte *buf, const byte *key,
row if any. This is only used to read whole keys.
This method is called via index_read in the case of a WHERE clause using
- a regular non-primary key index, OR is called DIRECTLY when the WHERE clause
+ a primary key index OR is called DIRECTLY when the WHERE clause
uses a PRIMARY KEY index.
+
+ NOTES
+ This uses an internal result set that is deleted before function
+ returns. We need to be able to be calable from ha_rnd_pos()
*/
int ha_federated::index_read_idx(byte *buf, uint index, const byte *key,
uint key_len, enum ha_rkey_function find_flag)
{
int retval;
+ MYSQL_RES *mysql_result;
+ DBUG_ENTER("ha_federated::index_read_idx");
+
+ if ((retval= index_read_idx_with_result_set(buf, index, key,
+ key_len, find_flag,
+ &mysql_result)))
+ DBUG_RETURN(retval);
+ mysql_free_result(mysql_result);
+ DBUG_RETURN(retval);
+}
+
+
+/*
+ Create result set for rows matching query and return first row
+
+ RESULT
+ 0 ok In this case *result will contain the result set
+ table->status == 0
+ # error In this case *result will contain 0
+ table->status == STATUS_NOT_FOUND
+*/
+
+int ha_federated::index_read_idx_with_result_set(byte *buf, uint index,
+ const byte *key,
+ uint key_len,
+ ha_rkey_function find_flag,
+ MYSQL_RES **result)
+{
+ int retval;
char error_buffer[FEDERATED_QUERY_BUFFER_SIZE];
char index_value[STRING_BUFFER_USUAL_SIZE];
char sql_query_buffer[FEDERATED_QUERY_BUFFER_SIZE];
- String index_string(index_value,
+ String index_string(index_value,
sizeof(index_value),
&my_charset_bin);
String sql_query(sql_query_buffer,
sizeof(sql_query_buffer),
&my_charset_bin);
key_range range;
- DBUG_ENTER("ha_federated::index_read_idx");
+ DBUG_ENTER("ha_federated::index_read_idx_with_result_set");
+ *result= 0; // In case of errors
index_string.length(0);
sql_query.length(0);
statistic_increment(table->in_use->status_var.ha_read_key_count,
@@ -2053,20 +2079,6 @@ int ha_federated::index_read_idx(byte *buf, uint index, const byte *key,
NULL, 0, 0);
sql_query.append(index_string);
- DBUG_PRINT("info",
- ("current key %d key value %s index_string value %s length %d",
- index, (char*) key, index_string.c_ptr_quick(),
- index_string.length()));
-
- DBUG_PRINT("info",
- ("current position %d sql_query %s", current_position,
- sql_query.c_ptr_quick()));
-
- if (stored_result)
- {
- mysql_free_result(stored_result);
- stored_result= 0;
- }
if (mysql_real_query(mysql, sql_query.ptr(), sql_query.length()))
{
my_sprintf(error_buffer, (error_buffer, "error: %d '%s'",
@@ -2074,45 +2086,41 @@ int ha_federated::index_read_idx(byte *buf, uint index, const byte *key,
retval= ER_QUERY_ON_FOREIGN_DATA_SOURCE;
goto error;
}
- stored_result= mysql_store_result(mysql);
-
- if (!stored_result)
+ if (!(*result= mysql_store_result(mysql)))
{
retval= HA_ERR_END_OF_FILE;
goto error;
}
- /*
- This basically says that the record in table->record[0] is legal,
- and that it is ok to use this record, for whatever reason, such
- as with a join (without it, joins will not work)
- */
- table->status= 0;
+ if (!(retval= read_next(buf, *result)))
+ DBUG_RETURN(retval);
- retval= rnd_next(buf);
+ mysql_free_result(*result);
+ *result= 0;
+ table->status= STATUS_NOT_FOUND;
DBUG_RETURN(retval);
error:
- if (stored_result)
- {
- mysql_free_result(stored_result);
- stored_result= 0;
- }
table->status= STATUS_NOT_FOUND;
my_error(retval, MYF(0), error_buffer);
DBUG_RETURN(retval);
}
+
/* Initialized at each key walk (called multiple times unlike rnd_init()) */
+
int ha_federated::index_init(uint keynr, bool sorted)
{
DBUG_ENTER("ha_federated::index_init");
- DBUG_PRINT("info",
- ("table: '%s' key: %d", table->s->table_name.str, keynr));
+ DBUG_PRINT("info", ("table: '%s' key: %u", table->s->table_name, keynr));
active_index= keynr;
DBUG_RETURN(0);
}
+/*
+ Read first range
+*/
+
int ha_federated::read_range_first(const key_range *start_key,
const key_range *end_key,
bool eq_range, bool sorted)
@@ -2124,8 +2132,7 @@ int ha_federated::read_range_first(const key_range *start_key,
&my_charset_bin);
DBUG_ENTER("ha_federated::read_range_first");
- if (start_key == NULL && end_key == NULL)
- DBUG_RETURN(0);
+ DBUG_ASSERT(!(start_key == NULL && end_key == NULL));
sql_query.length(0);
sql_query.append(share->select_query);
@@ -2133,6 +2140,11 @@ int ha_federated::read_range_first(const key_range *start_key,
&table->key_info[active_index],
start_key, end_key, 0, eq_range);
+ if (stored_result)
+ {
+ mysql_free_result(stored_result);
+ stored_result= 0;
+ }
if (mysql_real_query(mysql, sql_query.ptr(), sql_query.length()))
{
retval= ER_QUERY_ON_FOREIGN_DATA_SOURCE;
@@ -2140,38 +2152,21 @@ int ha_federated::read_range_first(const key_range *start_key,
}
sql_query.length(0);
- if (stored_result)
- {
- DBUG_PRINT("info",
- ("mysql_free_result address %lx", stored_result));
- mysql_free_result(stored_result);
- stored_result= 0;
- }
- stored_result= mysql_store_result(mysql);
-
- if (!stored_result)
+ if (!(stored_result= mysql_store_result(mysql)))
{
retval= HA_ERR_END_OF_FILE;
goto error;
}
-
- /* This was successful, please let it be known! */
- table->status= 0;
- retval= rnd_next(table->record[0]);
+ retval= read_next(table->record[0], stored_result);
DBUG_RETURN(retval);
error:
- table->status= STATUS_NOT_FOUND;
- if (stored_result)
- {
- DBUG_PRINT("info", ("mysql_free_result address %lx", stored_result));
- mysql_free_result(stored_result);
- stored_result= 0;
- }
- DBUG_RETURN(retval);
+ table->status= STATUS_NOT_FOUND;
+ DBUG_RETURN(retval);
}
+
int ha_federated::read_range_next()
{
int retval;
@@ -2184,13 +2179,13 @@ int ha_federated::read_range_next()
/* Used to read forward through the index. */
int ha_federated::index_next(byte *buf)
{
- int retval;
DBUG_ENTER("ha_federated::index_next");
statistic_increment(table->in_use->status_var.ha_read_next_count,
&LOCK_status);
- retval= rnd_next(buf);
- DBUG_RETURN(retval);
+ DBUG_RETURN(read_next(buf, stored_result));
}
+
+
/*
rnd_init() is called when the system wants the storage engine to do a table
scan.
@@ -2244,11 +2239,8 @@ int ha_federated::rnd_init(bool scan)
if (scan)
{
- DBUG_PRINT("info", ("share->select_query %s", share->select_query));
if (stored_result)
{
- DBUG_PRINT("info",
- ("mysql_free_result address %lx", stored_result));
mysql_free_result(stored_result);
stored_result= 0;
}
@@ -2265,28 +2257,25 @@ int ha_federated::rnd_init(bool scan)
DBUG_RETURN(0);
error:
- DBUG_RETURN(stash_remote_error());
+ DBUG_RETURN(stash_remote_error());
}
+
int ha_federated::rnd_end()
{
- int retval;
DBUG_ENTER("ha_federated::rnd_end");
-
- if (stored_result)
- {
- DBUG_PRINT("info", ("mysql_free_result address %lx", stored_result));
- mysql_free_result(stored_result);
- stored_result= 0;
- }
- retval= index_end();
- DBUG_RETURN(retval);
+ DBUG_RETURN(index_end());
}
int ha_federated::index_end(void)
{
DBUG_ENTER("ha_federated::index_end");
+ if (stored_result)
+ {
+ mysql_free_result(stored_result);
+ stored_result= 0;
+ }
active_index= MAX_KEY;
DBUG_RETURN(0);
}
@@ -2304,8 +2293,6 @@ int ha_federated::index_end(void)
int ha_federated::rnd_next(byte *buf)
{
- int retval;
- MYSQL_ROW row;
DBUG_ENTER("ha_federated::rnd_next");
if (stored_result == 0)
@@ -2313,32 +2300,60 @@ int ha_federated::rnd_next(byte *buf)
/*
Return value of rnd_init is not always checked (see records.cc),
so we can get here _even_ if there is _no_ pre-fetched result-set!
- TODO: fix it.
- */
+ TODO: fix it. We can delete this in 5.1 when rnd_init() is checked.
+ */
DBUG_RETURN(1);
}
-
+ DBUG_RETURN(read_next(buf, stored_result));
+}
+
+
+/*
+ ha_federated::read_next
+
+ reads from a result set and converts to mysql internal
+ format
+
+ SYNOPSIS
+ field_in_record_is_null()
+ buf byte pointer to record
+ result mysql result set
+
+ DESCRIPTION
+ This method is a wrapper method that reads one record from a result
+ set and converts it to the internal table format
+
+ RETURN VALUE
+ 1 error
+ 0 no error
+*/
+
+int ha_federated::read_next(byte *buf, MYSQL_RES *result)
+{
+ int retval;
+ my_ulonglong num_rows;
+ MYSQL_ROW row;
+ DBUG_ENTER("ha_federated::read_next");
+
+ table->status= STATUS_NOT_FOUND; // For easier return
+
/* Fetch a row, insert it back in a row format. */
- current_position= stored_result->data_cursor;
- DBUG_PRINT("info", ("current position %d", current_position));
- if (!(row= mysql_fetch_row(stored_result)))
+ if (!(row= mysql_fetch_row(result)))
DBUG_RETURN(HA_ERR_END_OF_FILE);
- retval= convert_row_to_internal_format(buf, row);
+ if (!(retval= convert_row_to_internal_format(buf, row, result)))
+ table->status= 0;
+
DBUG_RETURN(retval);
}
/*
- 'position()' is called after each call to rnd_next() if the data needs to be
- ordered. You can do something like the following to store the position:
- my_store_ptr(ref, ref_length, current_position);
+ store reference to current row so that we can later find it for
+ a re-read, update or delete.
- The server uses ref to store data. ref_length in the above case is the size
- needed to store current_position. ref is just a byte array that the server
- will maintain. If you are using offsets to mark rows, then current_position
- should be the offset. If it is a primary key like in BDB, then it needs to
- be a primary key.
+ In case of federated, a reference is either a primary key or
+ the whole record.
Called from filesort.cc, sql_select.cc, sql_delete.cc and sql_update.cc.
*/
@@ -2346,32 +2361,44 @@ int ha_federated::rnd_next(byte *buf)
void ha_federated::position(const byte *record)
{
DBUG_ENTER("ha_federated::position");
- /* my_store_ptr Add seek storage */
- *(MYSQL_ROW_OFFSET *) ref= current_position; // ref is always aligned
+ if (table->s->primary_key != MAX_KEY)
+ key_copy(ref, (byte *)record, table->key_info + table->s->primary_key,
+ ref_length);
+ else
+ memcpy(ref, record, ref_length);
DBUG_VOID_RETURN;
}
/*
This is like rnd_next, but you are given a position to use to determine the
- row. The position will be of the type that you stored in ref. You can use
- ha_get_ptr(pos,ref_length) to retrieve whatever key or position you saved
- when position() was called.
+ row. The position will be of the type that you stored in ref.
- This method is required for an ORDER BY.
+ This method is required for an ORDER BY
Called from filesort.cc records.cc sql_insert.cc sql_select.cc sql_update.cc.
*/
+
int ha_federated::rnd_pos(byte *buf, byte *pos)
{
+ int result;
DBUG_ENTER("ha_federated::rnd_pos");
-
statistic_increment(table->in_use->status_var.ha_read_rnd_count,
&LOCK_status);
- memcpy_fixed(&current_position, pos, sizeof(MYSQL_ROW_OFFSET));
- stored_result->current_row= 0;
- stored_result->data_cursor= current_position;
- DBUG_RETURN(rnd_next(buf));
+ if (table->s->primary_key != MAX_KEY)
+ {
+ /* We have a primary key, so use index_read_idx to find row */
+ result= index_read_idx(buf, table->s->primary_key, pos,
+ ref_length, HA_READ_KEY_EXACT);
+ }
+ else
+ {
+ /* otherwise, get the old record ref as obtained in ::position */
+ memcpy(buf, pos, ref_length);
+ result= 0;
+ }
+ table->status= result ? STATUS_NOT_FOUND : 0;
+ DBUG_RETURN(result);
}
@@ -2464,7 +2491,7 @@ void ha_federated::info(uint flag)
if (flag & HA_STATUS_VARIABLE | HA_STATUS_CONST)
{
- /*
+ /*
deleted is set in ha_federated::info
*/
/*
@@ -2476,11 +2503,13 @@ void ha_federated::info(uint flag)
delete_length = ?
*/
if (row[4] != NULL)
- stats.records= (ha_rows) my_strtoll10(row[4], (char**) 0,
+ stats.records= (ha_rows) my_strtoll10(row[4], (char**) 0,
&error);
if (row[5] != NULL)
- stats.mean_rec_length= (ha_rows) my_strtoll10(row[5], (char**) 0,
- &error);
+ stats.mean_rec_length= (ha_rows) my_strtoll10(row[5], (char**) 0, &error);
+
+ stats.data_file_length= stats.records * stats.mean_rec_length;
+
if (row[12] != NULL)
stats.update_time= (ha_rows) my_strtoll10(row[12], (char**) 0,
&error);
@@ -2488,8 +2517,13 @@ void ha_federated::info(uint flag)
stats.check_time= (ha_rows) my_strtoll10(row[13], (char**) 0,
&error);
}
+ /*
+ size of IO operations (This is based on a good guess, no high science
+ involved)
+ */
if (flag & HA_STATUS_CONST)
stats.block_size= 4096;
+
}
if (result)
@@ -2500,6 +2534,7 @@ void ha_federated::info(uint flag)
error:
if (result)
mysql_free_result(result);
+
my_sprintf(error_buffer, (error_buffer, ": %d : %s",
mysql_errno(mysql), mysql_error(mysql)));
my_error(error_code, MYF(0), error_buffer);
@@ -2580,6 +2615,7 @@ THR_LOCK_DATA **ha_federated::store_lock(THD *thd,
THR_LOCK_DATA **to,
enum thr_lock_type lock_type)
{
+ DBUG_ENTER("ha_federated::store_lock");
if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK)
{
/*
@@ -2609,7 +2645,7 @@ THR_LOCK_DATA **ha_federated::store_lock(THD *thd,
*to++= &lock;
- return to;
+ DBUG_RETURN(to);
}
/*
diff --git a/sql/ha_federated.h b/sql/ha_federated.h
index 4a4561ba274..fdb443e74c5 100644
--- a/sql/ha_federated.h
+++ b/sql/ha_federated.h
@@ -130,6 +130,7 @@ typedef struct st_federated_share {
remote host info, parse_url supplies
*/
char *scheme;
+ char *connect_string;
char *hostname;
char *username;
char *password;
@@ -139,7 +140,7 @@ typedef struct st_federated_share {
char *socket;
char *sport;
ushort port;
- uint table_name_length, use_count;
+ uint table_name_length, connect_string_length, use_count;
pthread_mutex_t mutex;
THR_LOCK lock;
} FEDERATED_SHARE;
@@ -153,7 +154,6 @@ class ha_federated: public handler
FEDERATED_SHARE *share; /* Shared lock info */
MYSQL *mysql; /* MySQL connection */
MYSQL_RES *stored_result;
- uint ref_length;
uint fetch_num; // stores the fetch num
MYSQL_ROW_OFFSET current_position; // Current position used by ::position()
int remote_error_number;
@@ -164,7 +164,8 @@ private:
return 0 on success
return errorcode otherwise
*/
- uint convert_row_to_internal_format(byte *buf, MYSQL_ROW row);
+ uint convert_row_to_internal_format(byte *buf, MYSQL_ROW row,
+ MYSQL_RES *result);
bool create_where_from_key(String *to, KEY *key_info,
const key_range *start_key,
const key_range *end_key,
@@ -304,6 +305,13 @@ public:
int connection_rollback();
int connection_autocommit(bool state);
int execute_simple_query(const char *query, int len);
+
+ int read_next(byte *buf, MYSQL_RES *result);
+ int index_read_idx_with_result_set(byte *buf, uint index,
+ const byte *key,
+ uint key_len,
+ ha_rkey_function find_flag,
+ MYSQL_RES **result);
};
int federated_db_init(void);
diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc
index 2d097c34f97..2acc1f4ed7d 100644
--- a/sql/ha_myisam.cc
+++ b/sql/ha_myisam.cc
@@ -1202,12 +1202,13 @@ int ha_myisam::index_read_idx(byte * buf, uint index, const byte * key,
int ha_myisam::index_read_last(byte * buf, const byte * key, uint key_len)
{
+ DBUG_ENTER("ha_myisam::index_read_last");
DBUG_ASSERT(inited==INDEX);
statistic_increment(table->in_use->status_var.ha_read_key_count,
&LOCK_status);
int error=mi_rkey(file,buf,active_index, key, key_len, HA_READ_PREFIX_LAST);
table->status=error ? STATUS_NOT_FOUND: 0;
- return error;
+ DBUG_RETURN(error);
}
int ha_myisam::index_next(byte * buf)
diff --git a/sql/ha_myisammrg.cc b/sql/ha_myisammrg.cc
index afeed5f79df..6be714dc2e3 100644
--- a/sql/ha_myisammrg.cc
+++ b/sql/ha_myisammrg.cc
@@ -555,7 +555,7 @@ static int myisammrg_init()
myisammrg_hton.db_type=DB_TYPE_MRG_MYISAM;
myisammrg_hton.create=myisammrg_create_handler;
myisammrg_hton.panic=myrg_panic;
- myisammrg_hton.flags= HTON_CAN_RECREATE | HTON_ALTER_CANNOT_CREATE;
+ myisammrg_hton.flags= HTON_CAN_RECREATE;
return 0;
}
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index bbeea2ca1ba..64f0cc0b76e 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -2473,9 +2473,7 @@ int ha_ndbcluster::write_row(byte *record)
m_skip_auto_increment= FALSE;
update_auto_increment();
- /* Ensure that handler is always called for auto_increment values */
- thd->next_insert_id= 0;
- m_skip_auto_increment= !auto_increment_column_changed;
+ m_skip_auto_increment= (insert_id_for_cur_row == 0);
}
}
@@ -2631,9 +2629,10 @@ int ha_ndbcluster::write_row(byte *record)
{
Ndb *ndb= get_ndb();
Uint64 next_val= (Uint64) table->next_number_field->val_int() + 1;
+ char buff[22];
DBUG_PRINT("info",
- ("Trying to set next auto increment value to %llu",
- (ulonglong) next_val));
+ ("Trying to set next auto increment value to %s",
+ llstr(next_val, buff)));
Ndb_tuple_id_range_guard g(m_share);
if (ndb->setAutoIncrementValue(m_table, g.range, next_val, TRUE)
== -1)
@@ -3067,11 +3066,13 @@ void ha_ndbcluster::unpack_record(byte *buf)
// Table with hidden primary key
int hidden_no= table_share->fields;
const NDBTAB *tab= m_table;
+ char buff[22];
const NDBCOL *hidden_col= tab->getColumn(hidden_no);
const NdbRecAttr* rec= m_value[hidden_no].rec;
DBUG_ASSERT(rec);
- DBUG_PRINT("hidden", ("%d: %s \"%llu\"", hidden_no,
- hidden_col->getName(), rec->u_64_value()));
+ DBUG_PRINT("hidden", ("%d: %s \"%s\"", hidden_no,
+ hidden_col->getName(),
+ llstr(rec->u_64_value(), buff)));
}
//DBUG_EXECUTE("value", print_results(););
#endif
@@ -3663,20 +3664,11 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
switch (operation) {
case HA_EXTRA_IGNORE_DUP_KEY: /* Dup keys don't rollback everything*/
DBUG_PRINT("info", ("HA_EXTRA_IGNORE_DUP_KEY"));
- if (current_thd->lex->sql_command == SQLCOM_REPLACE && !m_has_unique_index)
- {
- DBUG_PRINT("info", ("Turning ON use of write instead of insert"));
- m_use_write= TRUE;
- } else
- {
- DBUG_PRINT("info", ("Ignoring duplicate key"));
- m_ignore_dup_key= TRUE;
- }
+ DBUG_PRINT("info", ("Ignoring duplicate key"));
+ m_ignore_dup_key= TRUE;
break;
case HA_EXTRA_NO_IGNORE_DUP_KEY:
DBUG_PRINT("info", ("HA_EXTRA_NO_IGNORE_DUP_KEY"));
- DBUG_PRINT("info", ("Turning OFF use of write instead of insert"));
- m_use_write= FALSE;
m_ignore_dup_key= FALSE;
break;
case HA_EXTRA_IGNORE_NO_KEY:
@@ -3689,6 +3681,19 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
DBUG_PRINT("info", ("Turning on AO_IgnoreError at Commit/NoCommit"));
m_ignore_no_key= FALSE;
break;
+ case HA_EXTRA_WRITE_CAN_REPLACE:
+ DBUG_PRINT("info", ("HA_EXTRA_WRITE_CAN_REPLACE"));
+ if (!m_has_unique_index)
+ {
+ DBUG_PRINT("info", ("Turning ON use of write instead of insert"));
+ m_use_write= TRUE;
+ }
+ break;
+ case HA_EXTRA_WRITE_CANNOT_REPLACE:
+ DBUG_PRINT("info", ("HA_EXTRA_WRITE_CANNOT_REPLACE"));
+ DBUG_PRINT("info", ("Turning OFF use of write instead of insert"));
+ m_use_write= FALSE;
+ break;
default:
break;
}
@@ -4502,10 +4507,11 @@ static int create_ndb_column(NDBCOL &col,
// Set autoincrement
if (field->flags & AUTO_INCREMENT_FLAG)
{
+ char buff[22];
col.setAutoIncrement(TRUE);
ulonglong value= info->auto_increment_value ?
info->auto_increment_value : (ulonglong) 1;
- DBUG_PRINT("info", ("Autoincrement key, initial: %llu", value));
+ DBUG_PRINT("info", ("Autoincrement key, initial: %s", llstr(value, buff)));
col.setAutoIncrementInitialValue(value);
}
else
@@ -4781,7 +4787,7 @@ int ha_ndbcluster::create(const char *name,
expect it to be there.
*/
if (!ndbcluster_create_event(ndb, m_table, event_name.c_ptr(), share,
- share && do_event_op /* push warning */))
+ share && do_event_op ? 2 : 1/* push warning */))
{
if (ndb_extra_logging)
sql_print_information("NDB Binlog: CREATE TABLE Event: %s",
@@ -5175,7 +5181,7 @@ int ha_ndbcluster::rename_table(const char *from, const char *to)
const NDBTAB *ndbtab= ndbtab_g2.get_table();
if (!ndbcluster_create_event(ndb, ndbtab, event_name.c_ptr(), share,
- share && ndb_binlog_running /* push warning */))
+ share && ndb_binlog_running ? 2 : 1/* push warning */))
{
if (ndb_extra_logging)
sql_print_information("NDB Binlog: RENAME Event: %s",
@@ -6822,8 +6828,9 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname,
if (share->commit_count != 0)
{
*commit_count= share->commit_count;
- DBUG_PRINT("info", ("Getting commit_count: %llu from share",
- share->commit_count));
+ char buff[22];
+ DBUG_PRINT("info", ("Getting commit_count: %s from share",
+ llstr(share->commit_count, buff)));
pthread_mutex_unlock(&share->mutex);
free_share(&share);
DBUG_RETURN(0);
@@ -6851,7 +6858,9 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname,
pthread_mutex_lock(&share->mutex);
if (share->commit_count_lock == lock)
{
- DBUG_PRINT("info", ("Setting commit_count to %llu", stat.commit_count));
+ char buff[22];
+ DBUG_PRINT("info", ("Setting commit_count to %s",
+ llstr(stat.commit_count, buff)));
share->commit_count= stat.commit_count;
*commit_count= stat.commit_count;
}
@@ -6901,13 +6910,12 @@ ndbcluster_cache_retrieval_allowed(THD *thd,
char *full_name, uint full_name_len,
ulonglong *engine_data)
{
- DBUG_ENTER("ndbcluster_cache_retrieval_allowed");
-
Uint64 commit_count;
bool is_autocommit= !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN));
char *dbname= full_name;
char *tabname= dbname+strlen(dbname)+1;
-
+ char buff[22], buff2[22];
+ DBUG_ENTER("ndbcluster_cache_retrieval_allowed");
DBUG_PRINT("enter", ("dbname: %s, tabname: %s, is_autocommit: %d",
dbname, tabname, is_autocommit));
@@ -6923,8 +6931,8 @@ ndbcluster_cache_retrieval_allowed(THD *thd,
DBUG_PRINT("exit", ("No, could not retrieve commit_count"));
DBUG_RETURN(FALSE);
}
- DBUG_PRINT("info", ("*engine_data: %llu, commit_count: %llu",
- *engine_data, commit_count));
+ DBUG_PRINT("info", ("*engine_data: %s, commit_count: %s",
+ llstr(*engine_data, buff), llstr(commit_count, buff2)));
if (commit_count == 0)
{
*engine_data= 0; /* invalidate */
@@ -6938,7 +6946,8 @@ ndbcluster_cache_retrieval_allowed(THD *thd,
DBUG_RETURN(FALSE);
}
- DBUG_PRINT("exit", ("OK to use cache, engine_data: %llu", *engine_data));
+ DBUG_PRINT("exit", ("OK to use cache, engine_data: %s",
+ llstr(*engine_data, buff)));
DBUG_RETURN(TRUE);
}
@@ -6971,10 +6980,10 @@ ha_ndbcluster::register_query_cache_table(THD *thd,
qc_engine_callback *engine_callback,
ulonglong *engine_data)
{
- DBUG_ENTER("ha_ndbcluster::register_query_cache_table");
-
+ Uint64 commit_count;
+ char buff[22];
bool is_autocommit= !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN));
-
+ DBUG_ENTER("ha_ndbcluster::register_query_cache_table");
DBUG_PRINT("enter",("dbname: %s, tabname: %s, is_autocommit: %d",
m_dbname, m_tabname, is_autocommit));
@@ -6984,7 +6993,6 @@ ha_ndbcluster::register_query_cache_table(THD *thd,
DBUG_RETURN(FALSE);
}
- Uint64 commit_count;
if (ndb_get_commitcount(thd, m_dbname, m_tabname, &commit_count))
{
*engine_data= 0;
@@ -6993,7 +7001,7 @@ ha_ndbcluster::register_query_cache_table(THD *thd,
}
*engine_data= commit_count;
*engine_callback= ndbcluster_cache_retrieval_allowed;
- DBUG_PRINT("exit", ("commit_count: %llu", commit_count));
+ DBUG_PRINT("exit", ("commit_count: %s", llstr(commit_count, buff)));
DBUG_RETURN(commit_count > 0);
}
@@ -7409,18 +7417,19 @@ int
ndb_get_table_statistics(Ndb* ndb, const NDBTAB *ndbtab,
struct Ndb_statistics * ndbstat)
{
- DBUG_ENTER("ndb_get_table_statistics");
- DBUG_PRINT("enter", ("table: %s", ndbtab->getName()));
NdbTransaction* pTrans;
NdbError error;
int retries= 10;
int retry_sleep= 30 * 1000; /* 30 milliseconds */
+ char buff[22], buff2[22], buff3[22], buff4[22];
+ DBUG_ENTER("ndb_get_table_statistics");
+ DBUG_PRINT("enter", ("table: %s", ndbtab->getName()));
DBUG_ASSERT(ndbtab != 0);
do
{
- Uint64 rows, commits, mem;
+ Uint64 rows, commits, fixed_mem, var_mem;
Uint32 size;
Uint32 count= 0;
Uint64 sum_rows= 0;
@@ -7458,7 +7467,10 @@ ndb_get_table_statistics(Ndb* ndb, const NDBTAB *ndbtab,
pOp->getValue(NdbDictionary::Column::ROW_COUNT, (char*)&rows);
pOp->getValue(NdbDictionary::Column::COMMIT_COUNT, (char*)&commits);
pOp->getValue(NdbDictionary::Column::ROW_SIZE, (char*)&size);
- pOp->getValue(NdbDictionary::Column::FRAGMENT_MEMORY, (char*)&mem);
+ pOp->getValue(NdbDictionary::Column::FRAGMENT_FIXED_MEMORY,
+ (char*)&fixed_mem);
+ pOp->getValue(NdbDictionary::Column::FRAGMENT_VARSIZED_MEMORY,
+ (char*)&var_mem);
if (pTrans->execute(NdbTransaction::NoCommit,
NdbTransaction::AbortOnError,
@@ -7474,7 +7486,7 @@ ndb_get_table_statistics(Ndb* ndb, const NDBTAB *ndbtab,
sum_commits+= commits;
if (sum_row_size < size)
sum_row_size= size;
- sum_mem+= mem;
+ sum_mem+= fixed_mem + var_mem;
count++;
}
@@ -7493,10 +7505,13 @@ ndb_get_table_statistics(Ndb* ndb, const NDBTAB *ndbtab,
ndbstat->row_size= sum_row_size;
ndbstat->fragment_memory= sum_mem;
- DBUG_PRINT("exit", ("records: %llu commits: %llu "
- "row_size: %llu mem: %llu count: %u",
- sum_rows, sum_commits, sum_row_size,
- sum_mem, count));
+ DBUG_PRINT("exit", ("records: %s commits: %s "
+ "row_size: %s mem: %s count: %u",
+ llstr(sum_rows, buff),
+ llstr(sum_commits, buff2),
+ llstr(sum_row_size, buff3),
+ llstr(sum_mem, buff4),
+ count));
DBUG_RETURN(0);
retry:
@@ -8148,9 +8163,12 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused)))
if (ndbtab_g.get_table() &&
ndb_get_table_statistics(ndb, ndbtab_g.get_table(), &stat) == 0)
{
+ char buff[22], buff2[22];
DBUG_PRINT("ndb_util_thread",
("Table: %s, commit_count: %llu, rows: %llu",
- share->key, stat.commit_count, stat.row_count));
+ share->key,
+ llstr(stat.commit_count, buff),
+ llstr(stat.row_count, buff2)));
}
else
{
@@ -9866,7 +9884,6 @@ uint ha_ndbcluster::set_up_partition_info(partition_info *part_info,
}
else
{
-#ifdef NOT_YET
if (!current_thd->variables.new_mode)
{
push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
@@ -9875,9 +9892,8 @@ uint ha_ndbcluster::set_up_partition_info(partition_info *part_info,
ndbcluster_hton_name,
"LIST, RANGE and HASH partition disabled by default,"
" use --new option to enable");
- return HA_ERR_UNSUPPORTED;
+ DBUG_RETURN(HA_ERR_UNSUPPORTED);
}
-#endif
/*
Create a shadow field for those tables that have user defined
partitioning. This field stores the value of the partition
diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h
index 5e13a6664f0..a9e33491d07 100644
--- a/sql/ha_ndbcluster.h
+++ b/sql/ha_ndbcluster.h
@@ -655,6 +655,13 @@ class ha_ndbcluster: public handler
int get_default_no_partitions(HA_CREATE_INFO *info);
bool get_no_parts(const char *name, uint *no_parts);
void set_auto_partitions(partition_info *part_info);
+ virtual bool is_fatal_error(int error, uint flags)
+ {
+ if (!handler::is_fatal_error(error, flags) ||
+ error == HA_ERR_NO_PARTITION_FOUND)
+ return FALSE;
+ return TRUE;
+ }
THR_LOCK_DATA **store_lock(THD *thd,
THR_LOCK_DATA **to,
diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc
index b88002b8529..8e9f0077dd0 100644
--- a/sql/ha_ndbcluster_binlog.cc
+++ b/sql/ha_ndbcluster_binlog.cc
@@ -1220,7 +1220,7 @@ int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share,
type_str= "create table";
break;
case SOT_ALTER_TABLE:
- type_str= "create table";
+ type_str= "alter table";
break;
case SOT_DROP_DB:
type_str= "drop db";
@@ -2500,7 +2500,7 @@ ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab,
/*
failed, print a warning
*/
- if (push_warning)
+ if (push_warning > 1)
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
dict->getNdbError().code,
@@ -2528,7 +2528,7 @@ ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab,
if (dict->getNdbError().code == NDB_INVALID_SCHEMA_OBJECT &&
dict->dropEvent(my_event.getName()))
{
- if (push_warning)
+ if (push_warning > 1)
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
dict->getNdbError().code,
@@ -2547,7 +2547,7 @@ ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab,
*/
if (dict->createEvent(my_event))
{
- if (push_warning)
+ if (push_warning > 1)
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
dict->getNdbError().code,
@@ -3442,17 +3442,31 @@ restart:
// wait for the first event
thd->proc_info= "Waiting for first event from ndbcluster";
DBUG_PRINT("info", ("Waiting for the first event"));
- int schema_res= 0;
- Uint64 schema_gci= 0;
- while (schema_res == 0 && !abort_loop)
+ int schema_res, res;
+ Uint64 schema_gci;
+ do
{
+ if (abort_loop)
+ goto err;
schema_res= s_ndb->pollEvents(100, &schema_gci);
+ } while (ndb_latest_received_binlog_epoch == schema_gci);
+ if (ndb_binlog_running)
+ {
+ Uint64 gci= i_ndb->getLatestGCI();
+ while (gci < schema_gci || gci == ndb_latest_received_binlog_epoch)
+ {
+ if (abort_loop)
+ goto err;
+ res= i_ndb->pollEvents(10, &gci);
+ }
+ if (gci > schema_gci)
+ {
+ schema_gci= gci;
+ }
}
// now check that we have epochs consistant with what we had before the restart
DBUG_PRINT("info", ("schema_res: %d schema_gci: %d", schema_res, schema_gci));
- if (schema_res > 0)
{
- i_ndb->pollEvents(0);
i_ndb->flushIncompleteEvents(schema_gci);
s_ndb->flushIncompleteEvents(schema_gci);
if (schema_gci < ndb_latest_handled_binlog_epoch)
@@ -3466,6 +3480,17 @@ restart:
ndb_latest_applied_binlog_epoch= 0;
ndb_latest_received_binlog_epoch= 0;
}
+ else if (ndb_latest_applied_binlog_epoch > 0)
+ {
+ sql_print_warning("NDB Binlog: cluster has reconnected. "
+ "Changes to the database that occured while "
+ "disconnected will not be in the binlog");
+ }
+ if (ndb_extra_logging)
+ {
+ sql_print_information("NDB Binlog: starting log at epoch %u",
+ (unsigned)schema_gci);
+ }
}
}
{
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index 7929257d608..615c4bfb1bf 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -204,6 +204,7 @@ void ha_partition::init_handler_variables()
m_name_buffer_ptr= NULL;
m_engine_array= NULL;
m_file= NULL;
+ m_file_tot_parts= 0;
m_reorged_file= NULL;
m_new_file= NULL;
m_reorged_parts= 0;
@@ -1125,13 +1126,15 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt,
int ha_partition::prepare_new_partition(TABLE *table,
HA_CREATE_INFO *create_info,
- handler *file, const char *part_name)
+ handler *file, const char *part_name,
+ partition_element *p_elem)
{
int error;
bool create_flag= FALSE;
bool open_flag= FALSE;
DBUG_ENTER("prepare_new_partition");
+ set_up_table_before_create(table, part_name, create_info, 0, p_elem);
if ((error= file->create(part_name, table, create_info)))
goto error;
create_flag= TRUE;
@@ -1231,7 +1234,7 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
uint no_parts= m_part_info->partitions.elements;
uint no_subparts= m_part_info->no_subparts;
uint i= 0;
- uint no_remain_partitions, part_count;
+ uint no_remain_partitions, part_count, orig_count;
handler **new_file_array;
int error= 1;
bool first;
@@ -1266,10 +1269,10 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
} while (++i < no_parts);
}
if (m_reorged_parts &&
- !(m_reorged_file= (handler**)sql_calloc(sizeof(partition_element*)*
+ !(m_reorged_file= (handler**)sql_calloc(sizeof(handler*)*
(m_reorged_parts + 1))))
{
- mem_alloc_error(sizeof(partition_element*)*(m_reorged_parts+1));
+ mem_alloc_error(sizeof(handler*)*(m_reorged_parts+1));
DBUG_RETURN(ER_OUTOFMEMORY);
}
@@ -1340,6 +1343,7 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
ones used to be.
*/
first= FALSE;
+ DBUG_ASSERT(i + m_reorged_parts <= m_file_tot_parts);
memcpy((void*)m_reorged_file, &m_file[i*no_subparts],
sizeof(handler*)*m_reorged_parts*no_subparts);
}
@@ -1353,15 +1357,18 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
*/
i= 0;
part_count= 0;
+ orig_count= 0;
part_it.rewind();
do
{
partition_element *part_elem= part_it++;
if (part_elem->part_state == PART_NORMAL)
{
- memcpy((void*)&new_file_array[part_count], (void*)&m_file[i],
+ DBUG_ASSERT(orig_count + no_subparts <= m_file_tot_parts);
+ memcpy((void*)&new_file_array[part_count], (void*)&m_file[orig_count],
sizeof(handler*)*no_subparts);
part_count+= no_subparts;
+ orig_count+= no_subparts;
}
else if (part_elem->part_state == PART_CHANGED ||
part_elem->part_state == PART_TO_BE_ADDED)
@@ -1420,7 +1427,8 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
DBUG_PRINT("info", ("Add subpartition %s", part_name_buff));
if ((error= prepare_new_partition(table, create_info,
new_file_array[part],
- (const char *)part_name_buff)))
+ (const char *)part_name_buff,
+ sub_elem)))
{
cleanup_new_partition(part_count);
DBUG_RETURN(error);
@@ -1436,7 +1444,8 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
DBUG_PRINT("info", ("Add partition %s", part_name_buff));
if ((error= prepare_new_partition(table, create_info,
new_file_array[i],
- (const char *)part_name_buff)))
+ (const char *)part_name_buff,
+ part_elem)))
{
cleanup_new_partition(part_count);
DBUG_RETURN(error);
@@ -1648,7 +1657,7 @@ uint ha_partition::del_ren_cre_table(const char *from,
error= (*file)->delete_table((const char*) from_buff);
else
{
- set_up_table_before_create(table_arg, from_buff, create_info, i);
+ set_up_table_before_create(table_arg, from_buff, create_info, i, NULL);
error= (*file)->create(from_buff, table_arg, create_info);
}
name_buffer_ptr= strend(name_buffer_ptr) + 1;
@@ -1724,12 +1733,15 @@ partition_element *ha_partition::find_partition_element(uint part_id)
void ha_partition::set_up_table_before_create(TABLE *table,
const char *partition_name_with_path,
HA_CREATE_INFO *info,
- uint part_id)
+ uint part_id,
+ partition_element *part_elem)
{
- partition_element *part_elem= find_partition_element(part_id);
-
if (!part_elem)
- return; // Fatal error
+ {
+ part_elem= find_partition_element(part_id);
+ if (!part_elem)
+ return; // Fatal error
+ }
table->s->max_rows= part_elem->part_max_rows;
table->s->min_rows= part_elem->part_min_rows;
const char *partition_name= strrchr(partition_name_with_path, FN_LIBCHAR);
@@ -1959,6 +1971,7 @@ bool ha_partition::create_handlers(MEM_ROOT *mem_root)
if (!(m_file= (handler **) alloc_root(mem_root, alloc_len)))
DBUG_RETURN(TRUE);
+ m_file_tot_parts= m_tot_parts;
bzero((char*) m_file, alloc_len);
for (i= 0; i < m_tot_parts; i++)
{
@@ -2008,6 +2021,7 @@ bool ha_partition::new_handlers_from_part_info(MEM_ROOT *mem_root)
mem_alloc_error(alloc_len);
goto error_end;
}
+ m_file_tot_parts= m_tot_parts;
bzero((char*) m_file, alloc_len);
DBUG_ASSERT(m_part_info->no_parts > 0);
@@ -2229,7 +2243,8 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
m_table_flags&= ~(HA_CAN_GEOMETRY | HA_CAN_FULLTEXT | HA_DUPLICATE_POS |
HA_CAN_SQL_HANDLER | HA_CAN_INSERT_DELAYED);
m_table_flags|= HA_FILE_BASED | HA_REC_NOT_IN_SEQ;
-
+ key_used_on_scan= m_file[0]->key_used_on_scan;
+ implicit_emptied= m_file[0]->implicit_emptied;
/*
Add 2 bytes for partition id in position ref length.
ref_length=max_in_all_partitions(ref_length) + PARTITION_BYTES_IN_POS
@@ -3265,6 +3280,7 @@ int ha_partition::index_read(byte * buf, const byte * key,
DBUG_ENTER("ha_partition::index_read");
end_range= 0;
+ m_index_scan_type= partition_index_read;
DBUG_RETURN(common_index_read(buf, key, key_len, find_flag));
}
@@ -3282,18 +3298,24 @@ int ha_partition::common_index_read(byte *buf, const byte *key, uint key_len,
enum ha_rkey_function find_flag)
{
int error;
+ bool reverse_order= FALSE;
DBUG_ENTER("ha_partition::common_index_read");
memcpy((void*)m_start_key.key, key, key_len);
m_start_key.length= key_len;
m_start_key.flag= find_flag;
- m_index_scan_type= partition_index_read;
if ((error= partition_scan_set_up(buf, TRUE)))
{
DBUG_RETURN(error);
}
-
+ if (find_flag == HA_READ_PREFIX_LAST ||
+ find_flag == HA_READ_PREFIX_LAST_OR_PREV ||
+ find_flag == HA_READ_BEFORE_KEY)
+ {
+ reverse_order= TRUE;
+ m_ordered_scan_ongoing= TRUE;
+ }
if (!m_ordered_scan_ongoing ||
(find_flag == HA_READ_KEY_EXACT &&
(key_len >= m_curr_key_info->key_length ||
@@ -3319,7 +3341,7 @@ int ha_partition::common_index_read(byte *buf, const byte *key, uint key_len,
In all other cases we will use the ordered index scan. This will use
the partition set created by the get_partition_set method.
*/
- error= handle_ordered_index_scan(buf);
+ error= handle_ordered_index_scan(buf, reverse_order);
}
DBUG_RETURN(error);
}
@@ -3400,9 +3422,10 @@ int ha_partition::common_first_last(byte *buf)
if ((error= partition_scan_set_up(buf, FALSE)))
return error;
- if (!m_ordered_scan_ongoing)
+ if (!m_ordered_scan_ongoing &&
+ m_index_scan_type != partition_index_last)
return handle_unordered_scan_next_partition(buf);
- return handle_ordered_index_scan(buf);
+ return handle_ordered_index_scan(buf, FALSE);
}
@@ -3456,7 +3479,9 @@ int ha_partition::index_read_last(byte *buf, const byte *key, uint keylen)
DBUG_ENTER("ha_partition::index_read_last");
m_ordered= TRUE; // Safety measure
- DBUG_RETURN(index_read(buf, key, keylen, HA_READ_PREFIX_LAST));
+ end_range= 0;
+ m_index_scan_type= partition_index_read_last;
+ DBUG_RETURN(common_index_read(buf, key, keylen, HA_READ_PREFIX_LAST));
}
@@ -3596,6 +3621,7 @@ int ha_partition::read_range_first(const key_range *start_key,
}
else
{
+ m_index_scan_type= partition_index_read;
error= common_index_read(m_rec0,
start_key->key,
start_key->length, start_key->flag);
@@ -3854,12 +3880,11 @@ int ha_partition::handle_unordered_scan_next_partition(byte * buf)
entries.
*/
-int ha_partition::handle_ordered_index_scan(byte *buf)
+int ha_partition::handle_ordered_index_scan(byte *buf, bool reverse_order)
{
uint i;
uint j= 0;
bool found= FALSE;
- bool reverse_order= FALSE;
DBUG_ENTER("ha_partition::handle_ordered_index_scan");
m_top_entry= NO_CURRENT_PART_ID;
@@ -3880,7 +3905,6 @@ int ha_partition::handle_ordered_index_scan(byte *buf)
m_start_key.key,
m_start_key.length,
m_start_key.flag);
- reverse_order= FALSE;
break;
case partition_index_first:
error= file->index_first(rec_buf_ptr);
@@ -3890,6 +3914,12 @@ int ha_partition::handle_ordered_index_scan(byte *buf)
error= file->index_last(rec_buf_ptr);
reverse_order= TRUE;
break;
+ case partition_index_read_last:
+ error= file->index_read_last(rec_buf_ptr,
+ m_start_key.key,
+ m_start_key.length);
+ reverse_order= TRUE;
+ break;
default:
DBUG_ASSERT(FALSE);
DBUG_RETURN(HA_ERR_END_OF_FILE);
@@ -4665,6 +4695,27 @@ int ha_partition::extra(enum ha_extra_function operation)
*/
break;
}
+ case HA_EXTRA_WRITE_CAN_REPLACE:
+ case HA_EXTRA_WRITE_CANNOT_REPLACE:
+ {
+ /*
+ Informs handler that write_row() can replace rows which conflict
+ with row being inserted by PK/unique key without reporting error
+ to the SQL-layer.
+
+ This optimization is not safe for partitioned table in general case
+ since we may have to put new version of row into partition which is
+ different from partition in which old version resides (for example
+ when we partition by non-PK column or by some column which is not
+ part of unique key which were violated).
+ And since NDB which is the only engine at the moment that supports
+ this optimization handles partitioning on its own we simple disable
+ it here. (BTW for NDB this optimization is safe since it supports
+ only KEY partitioning and won't use this optimization for tables
+ which have additional unique constraints).
+ */
+ break;
+ }
default:
{
/* Temporary crash to discover what is wrong */
@@ -5262,7 +5313,7 @@ int ha_partition::cmp_ref(const byte *ref1, const byte *ref2)
MODULE auto increment
****************************************************************************/
-void ha_partition::restore_auto_increment()
+void ha_partition::restore_auto_increment(ulonglong)
{
DBUG_ENTER("ha_partition::restore_auto_increment");
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index b52c8d92164..c62f21cfaa1 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -46,7 +46,8 @@ private:
partition_index_read= 0,
partition_index_first= 1,
partition_index_last= 2,
- partition_no_index_scan= 3
+ partition_index_read_last= 3,
+ partition_no_index_scan= 4
};
/* Data for the partition handler */
int m_mode; // Open mode
@@ -55,6 +56,7 @@ private:
char *m_name_buffer_ptr; // Pointer to first partition name
handlerton **m_engine_array; // Array of types of the handlers
handler **m_file; // Array of references to handler inst.
+ uint m_file_tot_parts; // Debug
handler **m_new_file; // Array of references to new handlers
handler **m_reorged_file; // Reorganised partitions
handler **m_added_file; // Added parts kept for errors
@@ -202,7 +204,8 @@ private:
int copy_partitions(ulonglong *copied, ulonglong *deleted);
void cleanup_new_partition(uint part_count);
int prepare_new_partition(TABLE *table, HA_CREATE_INFO *create_info,
- handler *file, const char *part_name);
+ handler *file, const char *part_name,
+ partition_element *p_elem);
/*
delete_table, rename_table and create uses very similar logic which
is packed into this routine.
@@ -222,7 +225,8 @@ private:
void set_up_table_before_create(TABLE *table_arg,
const char *partition_name_with_path,
HA_CREATE_INFO *info,
- uint part_id);
+ uint part_id,
+ partition_element *p_elem);
partition_element *find_partition_element(uint part_id);
public:
@@ -302,6 +306,13 @@ public:
virtual void start_bulk_insert(ha_rows rows);
virtual int end_bulk_insert();
+ virtual bool is_fatal_error(int error, uint flags)
+ {
+ if (!handler::is_fatal_error(error, flags) ||
+ error == HA_ERR_NO_PARTITION_FOUND)
+ return FALSE;
+ return TRUE;
+ }
/*
-------------------------------------------------------------------------
MODULE full table scan
@@ -422,7 +433,7 @@ private:
return (queue_buf(part_id) +
PARTITION_BYTES_IN_POS);
}
- int handle_ordered_index_scan(byte * buf);
+ int handle_ordered_index_scan(byte * buf, bool reverse_order);
int handle_ordered_next(byte * buf, bool next_same);
int handle_ordered_prev(byte * buf);
void return_top_record(byte * buf);
@@ -804,7 +815,7 @@ public:
auto_increment_column_changed
-------------------------------------------------------------------------
*/
- virtual void restore_auto_increment();
+ virtual void restore_auto_increment(ulonglong prev_insert_id);
virtual void get_auto_increment(ulonglong offset, ulonglong increment,
ulonglong nb_desired_values,
ulonglong *first_value,
diff --git a/sql/handler.cc b/sql/handler.cc
index 0dfb31fba8c..b356102a61a 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -1514,7 +1514,10 @@ int handler::read_first_row(byte * buf, uint primary_key)
}
/*
- Generate the next auto-increment number based on increment and offset
+ Generate the next auto-increment number based on increment and offset:
+ computes the lowest number
+ - strictly greater than "nr"
+ - of the form: auto_increment_offset + N * auto_increment_increment
In most cases increment= offset= 1, in which case we get:
1,2,3,4,5,...
@@ -1523,8 +1526,10 @@ int handler::read_first_row(byte * buf, uint primary_key)
*/
inline ulonglong
-next_insert_id(ulonglong nr,struct system_variables *variables)
+compute_next_insert_id(ulonglong nr,struct system_variables *variables)
{
+ if (variables->auto_increment_increment == 1)
+ return (nr+1); // optimization of the formula below
nr= (((nr+ variables->auto_increment_increment -
variables->auto_increment_offset)) /
(ulonglong) variables->auto_increment_increment);
@@ -1533,6 +1538,58 @@ next_insert_id(ulonglong nr,struct system_variables *variables)
}
+void handler::adjust_next_insert_id_after_explicit_value(ulonglong nr)
+{
+ /*
+ If we have set THD::next_insert_id previously and plan to insert an
+ explicitely-specified value larger than this, we need to increase
+ THD::next_insert_id to be greater than the explicit value.
+ */
+ if ((next_insert_id > 0) && (nr >= next_insert_id))
+ set_next_insert_id(compute_next_insert_id(nr, &table->in_use->variables));
+}
+
+
+/*
+ Computes the largest number X:
+ - smaller than or equal to "nr"
+ - of the form: auto_increment_offset + N * auto_increment_increment
+ where N>=0.
+
+ SYNOPSIS
+ prev_insert_id
+ nr Number to "round down"
+ variables variables struct containing auto_increment_increment and
+ auto_increment_offset
+
+ RETURN
+ The number X if it exists, "nr" otherwise.
+*/
+
+inline ulonglong
+prev_insert_id(ulonglong nr, struct system_variables *variables)
+{
+ if (unlikely(nr < variables->auto_increment_offset))
+ {
+ /*
+ There's nothing good we can do here. That is a pathological case, where
+ the offset is larger than the column's max possible value, i.e. not even
+ the first sequence value may be inserted. User will receive warning.
+ */
+ DBUG_PRINT("info",("auto_increment: nr: %lu cannot honour "
+ "auto_increment_offset: %lu",
+ nr, variables->auto_increment_offset));
+ return nr;
+ }
+ if (variables->auto_increment_increment == 1)
+ return nr; // optimization of the formula below
+ nr= (((nr - variables->auto_increment_offset)) /
+ (ulonglong) variables->auto_increment_increment);
+ return (nr * (ulonglong) variables->auto_increment_increment +
+ variables->auto_increment_offset);
+}
+
+
/*
Update the auto_increment field if necessary
@@ -1546,7 +1603,7 @@ next_insert_id(ulonglong nr,struct system_variables *variables)
IMPLEMENTATION
- Updates columns with type NEXT_NUMBER if:
+ Updates the record's Field of type NEXT_NUMBER if:
- If column value is set to NULL (in which case
auto_increment_field_not_null is 0)
@@ -1554,25 +1611,31 @@ next_insert_id(ulonglong nr,struct system_variables *variables)
set. In the future we will only set NEXT_NUMBER fields if one sets them
to NULL (or they are not included in the insert list).
+ In those cases, we check if the currently reserved interval still has
+ values we have not used. If yes, we pick the smallest one and use it.
+ Otherwise:
- There are two different cases when the above is true:
-
- - thd->next_insert_id == 0 (This is the normal case)
- In this case we set the set the column for the first row to the value
- next_insert_id(get_auto_increment(column))) which is normally
- max-used-column-value +1.
+ - If a list of intervals has been provided to the statement via SET
+ INSERT_ID or via an Intvar_log_event (in a replication slave), we pick the
+ first unused interval from this list, consider it as reserved.
- We call get_auto_increment() only for the first row in a multi-row
- statement. For the following rows we generate new numbers based on the
- last used number.
+ - Otherwise we set the column for the first row to the value
+ next_insert_id(get_auto_increment(column))) which is usually
+ max-used-column-value+1.
+ We call get_auto_increment() for the first row in a multi-row
+ statement. get_auto_increment() will tell us the interval of values it
+ reserved for us.
- - thd->next_insert_id != 0. This happens when we have read an Intvar event
- of type INSERT_ID_EVENT from the binary log or when one has used SET
- INSERT_ID=#.
+ - In both cases, for the following rows we use those reserved values without
+ calling the handler again (we just progress in the interval, computing
+ each new value from the previous one). Until we have exhausted them, then
+ we either take the next provided interval or call get_auto_increment()
+ again to reserve a new interval.
- In this case we will set the column to the value of next_insert_id.
- The next row will be given the id
- next_insert_id(next_insert_id)
+ - In both cases, the reserved intervals are remembered in
+ thd->auto_inc_intervals_in_cur_stmt_for_binlog if statement-based
+ binlogging; the last reserved interval is remembered in
+ auto_inc_interval_for_cur_row.
The idea is that generated auto_increment values are predictable and
independent of the column values in the table. This is needed to be
@@ -1583,7 +1646,13 @@ next_insert_id(ulonglong nr,struct system_variables *variables)
inserts a column with a higher value than the last used one, we will
start counting from the inserted value.
- thd->next_insert_id is cleared after it's been used for a statement.
+ This function's "outputs" are: the table's auto_increment field is filled
+ with a value, thd->next_insert_id is filled with the value to use for the
+ next row, if a value was autogenerated for the current row it is stored in
+ thd->insert_id_for_cur_row, if get_auto_increment() was called
+ thd->auto_inc_interval_for_cur_row is modified, if that interval is not
+ present in thd->auto_inc_intervals_in_cur_stmt_for_binlog it is added to
+ this list.
TODO
@@ -1600,7 +1669,8 @@ next_insert_id(ulonglong nr,struct system_variables *variables)
bool handler::update_auto_increment()
{
- ulonglong nr;
+ ulonglong nr, nb_reserved_values;
+ bool append= FALSE;
THD *thd= table->in_use;
struct system_variables *variables= &thd->variables;
bool auto_increment_field_not_null;
@@ -1608,10 +1678,10 @@ bool handler::update_auto_increment()
DBUG_ENTER("handler::update_auto_increment");
/*
- We must save the previous value to be able to restore it if the
- row was not inserted
+ next_insert_id is a "cursor" into the reserved interval, it may go greater
+ than the interval, but not smaller.
*/
- thd->prev_insert_id= thd->next_insert_id;
+ DBUG_ASSERT(next_insert_id >= auto_inc_interval_for_cur_row.minimum());
auto_increment_field_not_null= table->auto_increment_field_not_null;
table->auto_increment_field_not_null= FALSE; // to reset for next row
@@ -1620,131 +1690,140 @@ bool handler::update_auto_increment()
thd->variables.sql_mode & MODE_NO_AUTO_VALUE_ON_ZERO)
{
/*
- The user did specify a value for the auto_inc column, we don't generate
- a new value, write it down.
- */
- auto_increment_column_changed=0;
-
- /*
Update next_insert_id if we had already generated a value in this
statement (case of INSERT VALUES(null),(3763),(null):
the last NULL needs to insert 3764, not the value of the first NULL plus
1).
*/
- if (thd->clear_next_insert_id && nr >= thd->next_insert_id)
- {
- if (variables->auto_increment_increment != 1)
- nr= next_insert_id(nr, variables);
- else
- nr++;
- thd->next_insert_id= nr;
- DBUG_PRINT("info",("next_insert_id: %lu", (ulong) nr));
- }
+ adjust_next_insert_id_after_explicit_value(nr);
+ insert_id_for_cur_row= 0; // didn't generate anything
DBUG_RETURN(0);
}
- if (!(nr= thd->next_insert_id))
+
+ if ((nr= next_insert_id) >= auto_inc_interval_for_cur_row.maximum())
{
- ulonglong nb_desired_values= 1, nb_reserved_values;
-#ifdef TO_BE_ENABLED_SOON
- /*
- Reserved intervals will be stored in "THD::auto_inc_intervals".
- handler::estimation_rows_to_insert will be the argument passed by
- handler::ha_start_bulk_insert().
- */
- uint estimation_known= test(estimation_rows_to_insert > 0);
- uint nb_already_reserved_intervals= thd->auto_inc_intervals.nb_elements();
- /*
- If an estimation was given to the engine:
- - use it.
- - if we already reserved numbers, it means the estimation was
- not accurate, then we'll reserve 2*AUTO_INC_DEFAULT_NB_VALUES the 2nd
- time, twice that the 3rd time etc.
- If no estimation was given, use those increasing defaults from the
- start, starting from AUTO_INC_DEFAULT_NB_VALUES.
- Don't go beyond a max to not reserve "way too much" (because reservation
- means potentially losing unused values).
- */
- if (nb_already_reserved_intervals == 0 && estimation_known)
- nb_desired_values= estimation_rows_to_insert;
- else /* go with the increasing defaults */
+ /* next_insert_id is beyond what is reserved, so we reserve more. */
+ const Discrete_interval *forced=
+ thd->auto_inc_intervals_forced.get_next();
+ if (forced != NULL)
{
- /* avoid overflow in formula, with this if() */
- if (nb_already_reserved_intervals <= AUTO_INC_DEFAULT_NB_MAX_BITS)
+ nr= forced->minimum();
+ nb_reserved_values= forced->values();
+ }
+ else
+ {
+ /*
+ handler::estimation_rows_to_insert was set by
+ handler::ha_start_bulk_insert(); if 0 it means "unknown".
+ */
+ uint nb_already_reserved_intervals=
+ thd->auto_inc_intervals_in_cur_stmt_for_binlog.nb_elements();
+ ulonglong nb_desired_values;
+ /*
+ If an estimation was given to the engine:
+ - use it.
+ - if we already reserved numbers, it means the estimation was
+ not accurate, then we'll reserve 2*AUTO_INC_DEFAULT_NB_ROWS the 2nd
+ time, twice that the 3rd time etc.
+ If no estimation was given, use those increasing defaults from the
+ start, starting from AUTO_INC_DEFAULT_NB_ROWS.
+ Don't go beyond a max to not reserve "way too much" (because
+ reservation means potentially losing unused values).
+ */
+ if (nb_already_reserved_intervals == 0 &&
+ (estimation_rows_to_insert > 0))
+ nb_desired_values= estimation_rows_to_insert;
+ else /* go with the increasing defaults */
{
- nb_desired_values= AUTO_INC_DEFAULT_NB_VALUES *
- (1 << nb_already_reserved_intervals);
- set_if_smaller(nb_desired_values, AUTO_INC_DEFAULT_NB_MAX);
+ /* avoid overflow in formula, with this if() */
+ if (nb_already_reserved_intervals <= AUTO_INC_DEFAULT_NB_MAX_BITS)
+ {
+ nb_desired_values= AUTO_INC_DEFAULT_NB_ROWS *
+ (1 << nb_already_reserved_intervals);
+ set_if_smaller(nb_desired_values, AUTO_INC_DEFAULT_NB_MAX);
+ }
+ else
+ nb_desired_values= AUTO_INC_DEFAULT_NB_MAX;
}
- else
- nb_desired_values= AUTO_INC_DEFAULT_NB_MAX;
+ /* This call ignores all its parameters but nr, currently */
+ get_auto_increment(variables->auto_increment_offset,
+ variables->auto_increment_increment,
+ nb_desired_values, &nr,
+ &nb_reserved_values);
+ if (nr == ~(ulonglong) 0)
+ result= 1; // Mark failure
+
+ /*
+ That rounding below should not be needed when all engines actually
+ respect offset and increment in get_auto_increment(). But they don't
+ so we still do it. Wonder if for the not-first-in-index we should do
+ it. Hope that this rounding didn't push us out of the interval; even
+ if it did we cannot do anything about it (calling the engine again
+ will not help as we inserted no row).
+ */
+ nr= compute_next_insert_id(nr-1, variables);
+ }
+
+ if (table->s->next_number_key_offset == 0)
+ {
+ /* We must defer the appending until "nr" has been possibly truncated */
+ append= TRUE;
+ }
+ else
+ {
+ /*
+ For such auto_increment there is no notion of interval, just a
+ singleton. The interval is not even stored in
+ thd->auto_inc_interval_for_cur_row, so we are sure to call the engine
+ for next row.
+ */
+ DBUG_PRINT("info",("auto_increment: special not-first-in-index"));
}
-#endif
- /* This call ignores all its parameters but nr, currently */
- get_auto_increment(variables->auto_increment_offset,
- variables->auto_increment_increment,
- nb_desired_values, &nr,
- &nb_reserved_values);
- if (nr == ~(ulonglong) 0)
- result= 1; // Mark failure
-
- /*
- That should not be needed when engines actually use offset and increment
- above.
- */
- if (variables->auto_increment_increment != 1)
- nr= next_insert_id(nr-1, variables);
- /*
- Update next row based on the found value. This way we don't have to
- call the handler for every generated auto-increment value on a
- multi-row statement
- */
- thd->next_insert_id= nr;
}
DBUG_PRINT("info",("auto_increment: %lu", (ulong) nr));
- /* Mark that we should clear next_insert_id before next stmt */
- thd->clear_next_insert_id= 1;
-
- if (!table->next_number_field->store((longlong) nr, TRUE))
- thd->insert_id((ulonglong) nr);
- else
- thd->insert_id(table->next_number_field->val_int());
-
- /*
- We can't set next_insert_id if the auto-increment key is not the
- first key part, as there is no guarantee that the first parts will be in
- sequence
- */
- if (!table->s->next_number_key_offset)
+ if (unlikely(table->next_number_field->store((longlong) nr, TRUE)))
{
/*
- Set next insert id to point to next auto-increment value to be able to
- handle multi-row statements
- This works even if auto_increment_increment > 1
+ field refused this value (overflow) and truncated it, use the result of
+ the truncation (which is going to be inserted); however we try to
+ decrease it to honour auto_increment_* variables.
+ That will shift the left bound of the reserved interval, we don't
+ bother shifting the right bound (anyway any other value from this
+ interval will cause a duplicate key).
*/
- thd->next_insert_id= next_insert_id(nr, variables);
+ nr= prev_insert_id(table->next_number_field->val_int(), variables);
+ if (unlikely(table->next_number_field->store((longlong) nr, TRUE)))
+ nr= table->next_number_field->val_int();
+ }
+ if (append)
+ {
+ auto_inc_interval_for_cur_row.replace(nr, nb_reserved_values,
+ variables->auto_increment_increment);
+ /* Row-based replication does not need to store intervals in binlog */
+ if (!thd->current_stmt_binlog_row_based)
+ result= result ||
+ thd->auto_inc_intervals_in_cur_stmt_for_binlog.append(auto_inc_interval_for_cur_row.minimum(),
+ auto_inc_interval_for_cur_row.values(),
+ variables->auto_increment_increment);
}
- else
- thd->next_insert_id= 0;
-
- /* Mark that we generated a new value */
- auto_increment_column_changed=1;
- DBUG_RETURN(result);
-}
-
-/*
- restore_auto_increment
- In case of error on write, we restore the last used next_insert_id value
- because the previous value was not used.
-*/
+ /*
+ Record this autogenerated value. If the caller then
+ succeeds to insert this value, it will call
+ record_first_successful_insert_id_in_cur_stmt()
+ which will set first_successful_insert_id_in_cur_stmt if it's not
+ already set.
+ */
+ insert_id_for_cur_row= nr;
+ /*
+ Set next insert id to point to next auto-increment value to be able to
+ handle multi-row statements.
+ */
+ set_next_insert_id(compute_next_insert_id(nr, variables));
-void handler::restore_auto_increment()
-{
- THD *thd= table->in_use;
- if (thd->next_insert_id)
- thd->next_insert_id= thd->prev_insert_id;
+ DBUG_RETURN(result);
}
@@ -1840,6 +1919,23 @@ void handler::get_auto_increment(ulonglong offset, ulonglong increment,
}
+void handler::ha_release_auto_increment()
+{
+ release_auto_increment();
+ insert_id_for_cur_row= 0;
+ auto_inc_interval_for_cur_row.replace(0, 0, 0);
+ if (next_insert_id > 0)
+ {
+ next_insert_id= 0;
+ /*
+ this statement used forced auto_increment values if there were some,
+ wipe them away for other statements.
+ */
+ table->in_use->auto_inc_intervals_forced.empty();
+ }
+}
+
+
void handler::print_keydup_error(uint key_nr, const char *msg)
{
/* Write the duplicated key in the error message */
@@ -3369,10 +3465,13 @@ namespace
int handler::ha_external_lock(THD *thd, int lock_type)
{
DBUG_ENTER("handler::ha_external_lock");
- int error;
- if (unlikely(error= external_lock(thd, lock_type)))
- DBUG_RETURN(error);
- DBUG_RETURN(0);
+ /*
+ Whether this is lock or unlock, this should be true, and is to verify that
+ if get_auto_increment() was called (thus may have reserved intervals or
+ taken a table lock), ha_release_auto_increment() was too.
+ */
+ DBUG_ASSERT(next_insert_id == 0);
+ DBUG_RETURN(external_lock(thd, lock_type));
}
diff --git a/sql/handler.h b/sql/handler.h
index 94f4519a2e7..ddcd6f860a7 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -218,11 +218,6 @@
#define HA_BLOCK_LOCK 256 /* unlock when reading some records */
#define HA_OPEN_TEMPORARY 512
- /* Errors on write which is recoverable (Key exist) */
-#define HA_WRITE_SKIP 121 /* Duplicate key on write */
-#define HA_READ_CHECK 123 /* Update with is recoverable */
-#define HA_CANT_DO_THAT 131 /* Databasehandler can't do it */
-
/* Some key definitions */
#define HA_KEY_NULL_LENGTH 1
#define HA_KEY_BLOB_LENGTH 2
@@ -242,6 +237,11 @@
/* Options of START TRANSACTION statement (and later of SET TRANSACTION stmt) */
#define MYSQL_START_TRANS_OPT_WITH_CONS_SNAPSHOT 1
+/* Flags for method is_fatal_error */
+#define HA_CHECK_DUP_KEY 1
+#define HA_CHECK_DUP_UNIQUE 2
+#define HA_CHECK_DUP (HA_CHECK_DUP_KEY + HA_CHECK_DUP_UNIQUE)
+
enum legacy_db_type
{
DB_TYPE_UNKNOWN=0,DB_TYPE_DIAB_ISAM=1,
@@ -679,7 +679,6 @@ struct handlerton
#define HTON_FLUSH_AFTER_RENAME (1 << 4)
#define HTON_NOT_USER_SELECTABLE (1 << 5)
#define HTON_TEMPORARY_NOT_SUPPORTED (1 << 6) //Having temporary tables not supported
-#define HTON_ALTER_CANNOT_CREATE (1 << 7) //Cannot use alter to create
typedef struct st_thd_trans
{
@@ -725,7 +724,8 @@ typedef struct st_ha_create_information
{
CHARSET_INFO *table_charset, *default_table_charset;
LEX_STRING connect_string;
- const char *comment,*password, *tablespace;
+ const char *password, *tablespace;
+ LEX_STRING comment;
const char *data_file_name, *index_file_name;
const char *alias;
ulonglong max_rows,min_rows;
@@ -906,16 +906,37 @@ public:
uint ref_length;
FT_INFO *ft_handler;
enum {NONE=0, INDEX, RND} inited;
- bool auto_increment_column_changed;
bool implicit_emptied; /* Can be !=0 only if HEAP */
const COND *pushed_cond;
+ /*
+ next_insert_id is the next value which should be inserted into the
+ auto_increment column: in a inserting-multi-row statement (like INSERT
+ SELECT), for the first row where the autoinc value is not specified by the
+ statement, get_auto_increment() called and asked to generate a value,
+ next_insert_id is set to the next value, then for all other rows
+ next_insert_id is used (and increased each time) without calling
+ get_auto_increment().
+ */
+ ulonglong next_insert_id;
+ /*
+ insert id for the current row (*autogenerated*; if not
+ autogenerated, it's 0).
+ At first successful insertion, this variable is stored into
+ THD::first_successful_insert_id_in_cur_stmt.
+ */
+ ulonglong insert_id_for_cur_row;
+ /*
+ Interval returned by get_auto_increment() and being consumed by the
+ inserter.
+ */
+ Discrete_interval auto_inc_interval_for_cur_row;
handler(const handlerton *ht_arg, TABLE_SHARE *share_arg)
:table_share(share_arg), estimation_rows_to_insert(0), ht(ht_arg),
ref(0), key_used_on_scan(MAX_KEY), active_index(MAX_KEY),
ref_length(sizeof(my_off_t)),
ft_handler(0), inited(NONE), implicit_emptied(0),
- pushed_cond(NULL)
+ pushed_cond(NULL), next_insert_id(0), insert_id_for_cur_row(0)
{}
virtual ~handler(void)
{
@@ -954,6 +975,7 @@ public:
return TRUE;
}
int ha_open(TABLE *table, const char *name, int mode, int test_if_locked);
+ void adjust_next_insert_id_after_explicit_value(ulonglong nr);
bool update_auto_increment();
void print_keydup_error(uint key_nr, const char *msg);
virtual void print_error(int error, myf errflag);
@@ -972,7 +994,27 @@ public:
bool has_transactions()
{ return (ha_table_flags() & HA_NO_TRANSACTIONS) == 0; }
virtual uint extra_rec_buf_length() const { return 0; }
-
+
+ /*
+ This method is used to analyse the error to see whether the error
+ is ignorable or not, certain handlers can have more error that are
+ ignorable than others. E.g. the partition handler can get inserts
+ into a range where there is no partition and this is an ignorable
+ error.
+ HA_ERR_FOUND_DUP_UNIQUE is a special case in MyISAM that means the
+ same thing as HA_ERR_FOUND_DUP_KEY but can in some cases lead to
+ a slightly different error message.
+ */
+ virtual bool is_fatal_error(int error, uint flags)
+ {
+ if (!error ||
+ ((flags & HA_CHECK_DUP_KEY) &&
+ (error == HA_ERR_FOUND_DUPP_KEY ||
+ error == HA_ERR_FOUND_DUPP_UNIQUE)))
+ return FALSE;
+ return TRUE;
+ }
+
/*
Number of rows in table. It will only be called if
(table_flags() & (HA_HAS_RECORDS | HA_STATS_RECORDS_IS_EXACT)) != 0
@@ -1024,7 +1066,7 @@ public:
DBUG_RETURN(rnd_end());
}
int ha_reset();
-
+
/* this is necessary in many places, e.g. in HANDLER command */
int ha_index_or_rnd_end()
{
@@ -1227,9 +1269,30 @@ public:
ulonglong nb_desired_values,
ulonglong *first_value,
ulonglong *nb_reserved_values);
+private:
virtual void release_auto_increment() { return; };
- virtual void restore_auto_increment();
-
+public:
+ void ha_release_auto_increment();
+ void set_next_insert_id(ulonglong id)
+ {
+ DBUG_PRINT("info",("auto_increment: next value %lu", (ulong)id));
+ next_insert_id= id;
+ }
+ void restore_auto_increment(ulonglong prev_insert_id)
+ {
+ /*
+ Insertion of a row failed, re-use the lastly generated auto_increment
+ id, for the next row. This is achieved by resetting next_insert_id to
+ what it was before the failed insertion (that old value is provided by
+ the caller). If that value was 0, it was the first row of the INSERT;
+ then if insert_id_for_cur_row contains 0 it means no id was generated
+ for this first row, so no id was generated since the INSERT started, so
+ we should set next_insert_id to 0; if insert_id_for_cur_row is not 0, it
+ is the generated id of the first and failed row, so we use it.
+ */
+ next_insert_id= (prev_insert_id > 0) ? prev_insert_id :
+ insert_id_for_cur_row;
+ }
/*
Reset the auto-increment counter to the given value, i.e. the next row
inserted will get the given value. This is called e.g. after TRUNCATE
diff --git a/sql/item.cc b/sql/item.cc
index 53797052788..1afe25b1990 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -587,6 +587,7 @@ void Item::set_name(const char *str, uint length, CHARSET_INFO *cs)
}
if (cs->ctype)
{
+ uint orig_len= length;
/*
This will probably need a better implementation in the future:
a function in CHARSET_INFO structure.
@@ -596,6 +597,11 @@ void Item::set_name(const char *str, uint length, CHARSET_INFO *cs)
length--;
str++;
}
+ if (orig_len != length && !is_autogenerated_name)
+ push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_REMOVED_SPACES, ER(ER_REMOVED_SPACES),
+ str + length - orig_len);
+
}
if (!my_charset_same(cs, system_charset_info))
{
@@ -1343,35 +1349,37 @@ void my_coll_agg_error(DTCollation &c1, DTCollation &c2, DTCollation &c3,
static
-void my_coll_agg_error(Item** args, uint count, const char *fname)
+void my_coll_agg_error(Item** args, uint count, const char *fname,
+ int item_sep)
{
if (count == 2)
- my_coll_agg_error(args[0]->collation, args[1]->collation, fname);
+ my_coll_agg_error(args[0]->collation, args[item_sep]->collation, fname);
else if (count == 3)
- my_coll_agg_error(args[0]->collation, args[1]->collation,
- args[2]->collation, fname);
+ my_coll_agg_error(args[0]->collation, args[item_sep]->collation,
+ args[2*item_sep]->collation, fname);
else
my_error(ER_CANT_AGGREGATE_NCOLLATIONS,MYF(0),fname);
}
bool agg_item_collations(DTCollation &c, const char *fname,
- Item **av, uint count, uint flags)
+ Item **av, uint count, uint flags, int item_sep)
{
uint i;
+ Item **arg;
c.set(av[0]->collation);
- for (i= 1; i < count; i++)
+ for (i= 1, arg= &av[item_sep]; i < count; i++, arg++)
{
- if (c.aggregate(av[i]->collation, flags))
+ if (c.aggregate((*arg)->collation, flags))
{
- my_coll_agg_error(av, count, fname);
+ my_coll_agg_error(av, count, fname, item_sep);
return TRUE;
}
}
if ((flags & MY_COLL_DISALLOW_NONE) &&
c.derivation == DERIVATION_NONE)
{
- my_coll_agg_error(av, count, fname);
+ my_coll_agg_error(av, count, fname, item_sep);
return TRUE;
}
return FALSE;
@@ -1382,7 +1390,7 @@ bool agg_item_collations_for_comparison(DTCollation &c, const char *fname,
Item **av, uint count, uint flags)
{
return (agg_item_collations(c, fname, av, count,
- flags | MY_COLL_DISALLOW_NONE));
+ flags | MY_COLL_DISALLOW_NONE, 1));
}
@@ -1405,16 +1413,26 @@ bool agg_item_collations_for_comparison(DTCollation &c, const char *fname,
For functions with more than two arguments:
collect(A,B,C) ::= collect(collect(A,B),C)
+
+ Since this function calls THD::change_item_tree() on the passed Item **
+ pointers, it is necessary to pass the original Item **'s, not copies.
+ Otherwise their values will not be properly restored (see BUG#20769).
+ If the items are not consecutive (eg. args[2] and args[5]), use the
+ item_sep argument, ie.
+
+ agg_item_charsets(coll, fname, &args[2], 2, flags, 3)
+
*/
bool agg_item_charsets(DTCollation &coll, const char *fname,
- Item **args, uint nargs, uint flags)
+ Item **args, uint nargs, uint flags, int item_sep)
{
Item **arg, **last, *safe_args[2];
LINT_INIT(safe_args[0]);
LINT_INIT(safe_args[1]);
- if (agg_item_collations(coll, fname, args, nargs, flags))
+
+ if (agg_item_collations(coll, fname, args, nargs, flags, item_sep))
return TRUE;
/*
@@ -1427,19 +1445,20 @@ bool agg_item_charsets(DTCollation &coll, const char *fname,
if (nargs >=2 && nargs <= 3)
{
safe_args[0]= args[0];
- safe_args[1]= args[1];
+ safe_args[1]= args[item_sep];
}
THD *thd= current_thd;
Query_arena *arena, backup;
bool res= FALSE;
+ uint i;
/*
In case we're in statement prepare, create conversion item
in its memory: it will be reused on each execute.
*/
arena= thd->activate_stmt_arena_if_needed(&backup);
- for (arg= args, last= args + nargs; arg < last; arg++)
+ for (i= 0, arg= args; i < nargs; i++, arg+= item_sep)
{
Item* conv;
uint32 dummy_offset;
@@ -1454,9 +1473,9 @@ bool agg_item_charsets(DTCollation &coll, const char *fname,
{
/* restore the original arguments for better error message */
args[0]= safe_args[0];
- args[1]= safe_args[1];
+ args[item_sep]= safe_args[1];
}
- my_coll_agg_error(args, nargs, fname);
+ my_coll_agg_error(args, nargs, fname, item_sep);
res= TRUE;
break; // we cannot return here, we need to restore "arena".
}
@@ -1488,7 +1507,18 @@ bool agg_item_charsets(DTCollation &coll, const char *fname,
}
-
+void Item_ident_for_show::make_field(Send_field *tmp_field)
+{
+ tmp_field->table_name= tmp_field->org_table_name= table_name;
+ tmp_field->db_name= db_name;
+ tmp_field->col_name= tmp_field->org_col_name= field->field_name;
+ tmp_field->charsetnr= field->charset()->number;
+ tmp_field->length=field->field_length;
+ tmp_field->type=field->type();
+ tmp_field->flags= field->table->maybe_null ?
+ (field->flags & ~NOT_NULL_FLAG) : field->flags;
+ tmp_field->decimals= 0;
+}
/**********************************************/
@@ -3925,7 +3955,9 @@ Field *Item::make_string_field(TABLE *table)
if (max_length/collation.collation->mbmaxlen > CONVERT_IF_BIGGER_TO_BLOB)
field= new Field_blob(max_length, maybe_null, name,
collation.collation);
- else if (max_length > 0)
+ /* Item_type_holder holds the exact type, do not change it */
+ else if (max_length > 0 &&
+ (type() != Item::TYPE_HOLDER || field_type() != MYSQL_TYPE_STRING))
field= new Field_varstring(max_length, maybe_null, name, table->s,
collation.collation);
else
@@ -4005,6 +4037,8 @@ Field *Item::tmp_table_field_from_field_type(TABLE *table, bool fixed_length)
field= new Field_time(maybe_null, name, &my_charset_bin);
break;
case MYSQL_TYPE_TIMESTAMP:
+ field= new Field_timestamp(maybe_null, name, &my_charset_bin);
+ break;
case MYSQL_TYPE_DATETIME:
field= new Field_datetime(maybe_null, name, &my_charset_bin);
break;
@@ -4038,7 +4072,11 @@ Field *Item::tmp_table_field_from_field_type(TABLE *table, bool fixed_length)
case MYSQL_TYPE_LONG_BLOB:
case MYSQL_TYPE_BLOB:
case MYSQL_TYPE_GEOMETRY:
- field= new Field_blob(max_length, maybe_null, name, collation.collation);
+ if (this->type() == Item::TYPE_HOLDER)
+ field= new Field_blob(max_length, maybe_null, name, collation.collation,
+ 1);
+ else
+ field= new Field_blob(max_length, maybe_null, name, collation.collation);
break; // Blob handled outside of case
}
if (field)
@@ -5436,9 +5474,14 @@ void Item_insert_value::print(String *str)
void Item_trigger_field::setup_field(THD *thd, TABLE *table,
GRANT_INFO *table_grant_info)
{
+ /*
+ It is too early to mark fields used here, because before execution
+ of statement that will invoke trigger other statements may use same
+ TABLE object, so all such mark-up will be wiped out.
+ So instead we do it in Table_triggers_list::mark_fields_used()
+ method which is called during execution of these statements.
+ */
enum_mark_columns save_mark_used_columns= thd->mark_used_columns;
-
- /* TODO: Think more about consequences of this step. */
thd->mark_used_columns= MARK_COLUMNS_NONE;
/*
Try to find field by its name and if it will be found
@@ -6203,7 +6246,7 @@ uint32 Item_type_holder::display_length(Item *item)
case MYSQL_TYPE_DOUBLE:
return 53;
case MYSQL_TYPE_NULL:
- return 4;
+ return 0;
case MYSQL_TYPE_LONGLONG:
return 20;
case MYSQL_TYPE_INT24:
diff --git a/sql/item.h b/sql/item.h
index a6132aba8b0..0e62d2aa9f0 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -737,9 +737,16 @@ public:
Any new item which can be NULL must implement this call.
*/
virtual bool is_null() { return 0; }
+
/*
- it is "top level" item of WHERE clause and we do not need correct NULL
- handling
+ Inform the item that there will be no distinction between its result
+ being FALSE or NULL.
+
+ NOTE
+ This function will be called for eg. Items that are top-level AND-parts
+ of the WHERE clause. Items implementing this function (currently
+ Item_cond_and and subquery-related item) enable special optimizations
+ when they are "top level".
*/
virtual void top_level_item() {}
/*
@@ -784,6 +791,7 @@ public:
virtual bool collect_item_field_processor(byte * arg) { return 0; }
virtual bool find_item_in_field_list_processor(byte *arg) { return 0; }
virtual bool change_context_processor(byte *context) { return 0; }
+ virtual bool is_expensive_processor(byte *arg) { return 0; }
virtual bool register_field_in_read_map(byte *arg) { return 0; }
/*
Check if a partition function is allowed
@@ -890,13 +898,6 @@ protected:
public:
LEX_STRING m_name;
- /*
- Buffer, pointing to the string value of the item. We need it to
- protect internal buffer from changes. See comment to analogous
- member in Item_param for more details.
- */
- String str_value_ptr;
-
public:
#ifndef DBUG_OFF
/*
@@ -1139,12 +1140,11 @@ public:
};
bool agg_item_collations(DTCollation &c, const char *name,
- Item **items, uint nitems, uint flags= 0);
+ Item **items, uint nitems, uint flags, int item_sep);
bool agg_item_collations_for_comparison(DTCollation &c, const char *name,
- Item **items, uint nitems,
- uint flags= 0);
+ Item **items, uint nitems, uint flags);
bool agg_item_charsets(DTCollation &c, const char *name,
- Item **items, uint nitems, uint flags= 0);
+ Item **items, uint nitems, uint flags, int item_sep);
class Item_num: public Item
@@ -1207,6 +1207,28 @@ public:
bool any_privileges);
};
+
+class Item_ident_for_show :public Item
+{
+public:
+ Field *field;
+ const char *db_name;
+ const char *table_name;
+
+ Item_ident_for_show(Field *par_field, const char *db_arg,
+ const char *table_name_arg)
+ :field(par_field), db_name(db_arg), table_name(table_name_arg)
+ {}
+
+ enum Type type() const { return FIELD_ITEM; }
+ double val_real() { return field->val_real(); }
+ longlong val_int() { return field->val_int(); }
+ String *val_str(String *str) { return field->val_str(str); }
+ my_decimal *val_decimal(my_decimal *dec) { return field->val_decimal(dec); }
+ void make_field(Send_field *tmp_field);
+};
+
+
class Item_equal;
class COND_EQUAL;
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index ce537614386..d3272fae4ed 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -409,7 +409,7 @@ void Item_bool_func2::fix_length_and_dec()
DTCollation coll;
if (args[0]->result_type() == STRING_RESULT &&
args[1]->result_type() == STRING_RESULT &&
- agg_arg_charsets(coll, args, 2, MY_COLL_CMP_CONV))
+ agg_arg_charsets(coll, args, 2, MY_COLL_CMP_CONV, 1))
return;
@@ -1226,7 +1226,7 @@ void Item_func_between::fix_length_and_dec()
agg_cmp_type(thd, &cmp_type, args, 3);
if (cmp_type == STRING_RESULT)
- agg_arg_charsets(cmp_collation, args, 3, MY_COLL_CMP_CONV);
+ agg_arg_charsets(cmp_collation, args, 3, MY_COLL_CMP_CONV, 1);
}
@@ -1346,7 +1346,7 @@ Item_func_ifnull::fix_length_and_dec()
switch (hybrid_type) {
case STRING_RESULT:
- agg_arg_charsets(collation, args, arg_count, MY_COLL_CMP_CONV);
+ agg_arg_charsets(collation, args, arg_count, MY_COLL_CMP_CONV, 1);
break;
case DECIMAL_RESULT:
case REAL_RESULT:
@@ -1518,7 +1518,7 @@ Item_func_if::fix_length_and_dec()
agg_result_type(&cached_result_type, args+1, 2);
if (cached_result_type == STRING_RESULT)
{
- if (agg_arg_charsets(collation, args+1, 2, MY_COLL_ALLOW_CONV))
+ if (agg_arg_charsets(collation, args+1, 2, MY_COLL_ALLOW_CONV, 1))
return;
}
else
@@ -1599,7 +1599,7 @@ Item_func_nullif::fix_length_and_dec()
unsigned_flag= args[0]->unsigned_flag;
cached_result_type= args[0]->result_type();
if (cached_result_type == STRING_RESULT &&
- agg_arg_charsets(collation, args, arg_count, MY_COLL_CMP_CONV))
+ agg_arg_charsets(collation, args, arg_count, MY_COLL_CMP_CONV, 1))
return;
}
}
@@ -1891,7 +1891,7 @@ void Item_func_case::fix_length_and_dec()
agg_result_type(&cached_result_type, agg, nagg);
if ((cached_result_type == STRING_RESULT) &&
- agg_arg_charsets(collation, agg, nagg, MY_COLL_ALLOW_CONV))
+ agg_arg_charsets(collation, agg, nagg, MY_COLL_ALLOW_CONV, 1))
return;
@@ -1907,7 +1907,7 @@ void Item_func_case::fix_length_and_dec()
nagg++;
agg_cmp_type(thd, &cmp_type, agg, nagg);
if ((cmp_type == STRING_RESULT) &&
- agg_arg_charsets(cmp_collation, agg, nagg, MY_COLL_CMP_CONV))
+ agg_arg_charsets(cmp_collation, agg, nagg, MY_COLL_CMP_CONV, 1))
return;
}
@@ -2037,7 +2037,7 @@ void Item_func_coalesce::fix_length_and_dec()
case STRING_RESULT:
count_only_length();
decimals= NOT_FIXED_DEC;
- agg_arg_charsets(collation, args, arg_count, MY_COLL_ALLOW_CONV);
+ agg_arg_charsets(collation, args, arg_count, MY_COLL_ALLOW_CONV, 1);
break;
case DECIMAL_RESULT:
count_decimal_length();
@@ -2501,7 +2501,7 @@ void Item_func_in::fix_length_and_dec()
agg_cmp_type(thd, &cmp_type, args, arg_count);
if (cmp_type == STRING_RESULT &&
- agg_arg_charsets(cmp_collation, args, arg_count, MY_COLL_CMP_CONV))
+ agg_arg_charsets(cmp_collation, args, arg_count, MY_COLL_CMP_CONV, 1))
return;
for (arg=args+1, arg_end=args+arg_count; arg != arg_end ; arg++)
@@ -3234,7 +3234,7 @@ Item_func_regex::fix_fields(THD *thd, Item **ref)
max_length= 1;
decimals= 0;
- if (agg_arg_charsets(cmp_collation, args, 2, MY_COLL_CMP_CONV))
+ if (agg_arg_charsets(cmp_collation, args, 2, MY_COLL_CMP_CONV, 1))
return TRUE;
used_tables_cache=args[0]->used_tables() | args[1]->used_tables();
@@ -3318,7 +3318,7 @@ longlong Item_func_regex::val_int()
}
}
null_value=0;
- return my_regexec(&preg,res->c_ptr(),0,(my_regmatch_t*) 0,0) ? 0 : 1;
+ return my_regexec(&preg,res->c_ptr_safe(),0,(my_regmatch_t*) 0,0) ? 0 : 1;
}
@@ -3671,6 +3671,28 @@ Item *Item_cond_or::neg_transformer(THD *thd) /* NOT(a OR b OR ...) -> */
}
+Item *Item_func_nop_all::neg_transformer(THD *thd)
+{
+ /* "NOT (e $cmp$ ANY (SELECT ...)) -> e $rev_cmp$" ALL (SELECT ...) */
+ Item_func_not_all *new_item= new Item_func_not_all(args[0]);
+ Item_allany_subselect *allany= (Item_allany_subselect*)args[0];
+ allany->func= allany->func_creator(FALSE);
+ allany->all= !allany->all;
+ allany->upper_item= new_item;
+ return new_item;
+}
+
+Item *Item_func_not_all::neg_transformer(THD *thd)
+{
+ /* "NOT (e $cmp$ ALL (SELECT ...)) -> e $rev_cmp$" ANY (SELECT ...) */
+ Item_func_nop_all *new_item= new Item_func_nop_all(args[0]);
+ Item_allany_subselect *allany= (Item_allany_subselect*)args[0];
+ allany->all= !allany->all;
+ allany->func= allany->func_creator(TRUE);
+ allany->upper_item= new_item;
+ return new_item;
+}
+
Item *Item_func_eq::negated_item() /* a = b -> a != b */
{
return new Item_func_ne(args[0], args[1]);
diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h
index 82cb5febe7d..04462e05e9f 100644
--- a/sql/item_cmpfunc.h
+++ b/sql/item_cmpfunc.h
@@ -313,6 +313,7 @@ public:
void set_sum_test(Item_sum_hybrid *item) { test_sum_item= item; };
void set_sub_test(Item_maxmin_subselect *item) { test_sub_item= item; };
bool empty_underlying_subquery();
+ Item *neg_transformer(THD *thd);
};
@@ -323,6 +324,7 @@ public:
Item_func_nop_all(Item *a) :Item_func_not_all(a) {}
longlong val_int();
const char *func_name() const { return "<nop>"; }
+ Item *neg_transformer(THD *thd);
};
diff --git a/sql/item_create.cc b/sql/item_create.cc
index 6eca6209438..7147142d8a7 100644
--- a/sql/item_create.cc
+++ b/sql/item_create.cc
@@ -296,12 +296,6 @@ Item *create_func_pow(Item* a, Item *b)
return new Item_func_pow(a,b);
}
-Item *create_func_current_user()
-{
- current_thd->lex->safe_to_cache_query= 0;
- return new Item_func_user(TRUE);
-}
-
Item *create_func_radians(Item *a)
{
return new Item_func_units((char*) "radians",a,M_PI/180,0.0);
@@ -426,7 +420,9 @@ Item *create_func_unhex(Item* a)
Item *create_func_uuid(void)
{
THD *thd= current_thd;
- thd->lex->binlog_row_based_if_mixed= 1;
+#ifdef HAVE_ROW_BASED_REPLICATION
+ thd->lex->binlog_row_based_if_mixed= TRUE;
+#endif
return new(thd->mem_root) Item_func_uuid();
}
diff --git a/sql/item_create.h b/sql/item_create.h
index c76dc6b9ad7..9b6a74b5bdd 100644
--- a/sql/item_create.h
+++ b/sql/item_create.h
@@ -73,7 +73,6 @@ Item *create_func_period_add(Item* a, Item *b);
Item *create_func_period_diff(Item* a, Item *b);
Item *create_func_pi(void);
Item *create_func_pow(Item* a, Item *b);
-Item *create_func_current_user(void);
Item *create_func_radians(Item *a);
Item *create_func_release_lock(Item* a);
Item *create_func_repeat(Item* a, Item *b);
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 8139ba81777..e901eaf8654 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -402,6 +402,12 @@ Field *Item_func::tmp_table_field(TABLE *table)
}
+bool Item_func::is_expensive_processor(byte *arg)
+{
+ return is_expensive();
+}
+
+
my_decimal *Item_func::val_decimal(my_decimal *decimal_value)
{
DBUG_ASSERT(fixed);
@@ -2036,7 +2042,7 @@ void Item_func_min_max::fix_length_and_dec()
cmp_type=item_cmp_type(cmp_type,args[i]->result_type());
}
if (cmp_type == STRING_RESULT)
- agg_arg_charsets(collation, args, arg_count, MY_COLL_CMP_CONV);
+ agg_arg_charsets(collation, args, arg_count, MY_COLL_CMP_CONV, 1);
else if ((cmp_type == DECIMAL_RESULT) || (cmp_type == INT_RESULT))
max_length= my_decimal_precision_to_length(max_int_part+decimals, decimals,
unsigned_flag);
@@ -2222,7 +2228,7 @@ longlong Item_func_coercibility::val_int()
void Item_func_locate::fix_length_and_dec()
{
maybe_null=0; max_length=11;
- agg_arg_charsets(cmp_collation, args, 2, MY_COLL_CMP_CONV);
+ agg_arg_charsets(cmp_collation, args, 2, MY_COLL_CMP_CONV, 1);
}
@@ -2339,7 +2345,7 @@ void Item_func_field::fix_length_and_dec()
for (uint i=1; i < arg_count ; i++)
cmp_type= item_cmp_type(cmp_type, args[i]->result_type());
if (cmp_type == STRING_RESULT)
- agg_arg_charsets(cmp_collation, args, arg_count, MY_COLL_CMP_CONV);
+ agg_arg_charsets(cmp_collation, args, arg_count, MY_COLL_CMP_CONV, 1);
}
@@ -2406,7 +2412,7 @@ void Item_func_find_in_set::fix_length_and_dec()
}
}
}
- agg_arg_charsets(cmp_collation, args, 2, MY_COLL_CMP_CONV);
+ agg_arg_charsets(cmp_collation, args, 2, MY_COLL_CMP_CONV, 1);
}
static const char separator=',';
@@ -2505,8 +2511,7 @@ void udf_handler::cleanup()
{
if (u_d->func_deinit != NULL)
{
- void (*deinit)(UDF_INIT *) = (void (*)(UDF_INIT*))
- u_d->func_deinit;
+ Udf_func_deinit deinit= u_d->func_deinit;
(*deinit)(&initid);
}
free_udf(u_d);
@@ -2651,9 +2656,7 @@ udf_handler::fix_fields(THD *thd, Item_result_field *func,
}
}
thd->net.last_error[0]=0;
- my_bool (*init)(UDF_INIT *, UDF_ARGS *, char *)=
- (my_bool (*)(UDF_INIT *, UDF_ARGS *, char *))
- u_d->func_init;
+ Udf_func_init init= u_d->func_init;
if ((error=(uchar) init(&initid, &f_args, thd->net.last_error)))
{
my_error(ER_CANT_INITIALIZE_UDF, MYF(0),
@@ -3283,12 +3286,20 @@ longlong Item_func_last_insert_id::val_int()
if (arg_count)
{
longlong value= args[0]->val_int();
- thd->insert_id(value);
null_value= args[0]->null_value;
- return value; // Avoid side effect of insert_id()
+ /*
+ LAST_INSERT_ID(X) must affect the client's mysql_insert_id() as
+ documented in the manual. We don't want to touch
+ first_successful_insert_id_in_cur_stmt because it would make
+ LAST_INSERT_ID(X) take precedence over an generated auto_increment
+ value for this row.
+ */
+ thd->arg_of_last_insert_id_function= TRUE;
+ thd->first_successful_insert_id_in_prev_stmt= value;
+ return value;
}
thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT);
- return thd->last_insert_id_used ? thd->current_insert_id : thd->insert_id();
+ return thd->read_first_successful_insert_id_in_prev_stmt();
}
/* This function is just used to test speed of different functions */
@@ -4393,7 +4404,8 @@ bool Item_func_match::fix_fields(THD *thd, Item **ref)
return 1;
}
table->fulltext_searched=1;
- return agg_arg_collations_for_comparison(cmp_collation, args+1, arg_count-1);
+ return agg_arg_collations_for_comparison(cmp_collation,
+ args+1, arg_count-1, 0);
}
bool Item_func_match::fix_index()
diff --git a/sql/item_func.h b/sql/item_func.h
index 0aedae73bdc..3a1952c8d0f 100644
--- a/sql/item_func.h
+++ b/sql/item_func.h
@@ -55,7 +55,7 @@ public:
NOT_FUNC, NOT_ALL_FUNC,
NOW_FUNC, TRIG_COND_FUNC,
GUSERVAR_FUNC, COLLATE_FUNC,
- EXTRACT_FUNC, CHAR_TYPECAST_FUNC, FUNC_SP };
+ EXTRACT_FUNC, CHAR_TYPECAST_FUNC, FUNC_SP, UDF_FUNC };
enum optimize_type { OPTIMIZE_NONE,OPTIMIZE_KEY,OPTIMIZE_OP, OPTIMIZE_NULL,
OPTIMIZE_EQUAL };
enum Type type() const { return FUNC_ITEM; }
@@ -156,7 +156,10 @@ public:
{
return (null_value=args[0]->get_time(ltime));
}
- bool is_null() { (void) val_int(); return null_value; }
+ bool is_null() {
+ (void) val_int(); /* Discard result. It sets null_value as side-effect. */
+ return null_value;
+ }
void signal_divide_by_null();
friend class udf_handler;
Field *tmp_table_field() { return result_field; }
@@ -166,26 +169,28 @@ public:
my_decimal *val_decimal(my_decimal *);
bool agg_arg_collations(DTCollation &c, Item **items, uint nitems,
- uint flags= 0)
+ uint flags)
{
- return agg_item_collations(c, func_name(), items, nitems, flags);
+ return agg_item_collations(c, func_name(), items, nitems, flags, 1);
}
bool agg_arg_collations_for_comparison(DTCollation &c,
Item **items, uint nitems,
- uint flags= 0)
+ uint flags)
{
return agg_item_collations_for_comparison(c, func_name(),
items, nitems, flags);
}
bool agg_arg_charsets(DTCollation &c, Item **items, uint nitems,
- uint flags= 0)
+ uint flags, int item_sep)
{
- return agg_item_charsets(c, func_name(), items, nitems, flags);
+ return agg_item_charsets(c, func_name(), items, nitems, flags, item_sep);
}
bool walk(Item_processor processor, bool walk_subquery, byte *arg);
Item *transform(Item_transformer transformer, byte *arg);
void traverse_cond(Cond_traverser traverser,
void * arg, traverse_order order);
+ bool is_expensive_processor(byte *arg);
+ virtual bool is_expensive() { return 0; }
};
@@ -959,6 +964,7 @@ public:
Item_udf_func(udf_func *udf_arg, List<Item> &list)
:Item_func(list), udf(udf_arg) {}
const char *func_name() const { return udf.name(); }
+ enum Functype functype() const { return UDF_FUNC; }
bool fix_fields(THD *thd, Item **ref)
{
DBUG_ASSERT(fixed == 0);
@@ -971,6 +977,7 @@ public:
void cleanup();
Item_result result_type () const { return udf.result_type(); }
table_map not_null_tables() const { return 0; }
+ bool is_expensive() { return 1; }
};
@@ -1500,6 +1507,7 @@ public:
virtual enum Functype functype() const { return FUNC_SP; }
bool fix_fields(THD *thd, Item **ref);
+ bool is_expensive() { return 1; }
};
diff --git a/sql/item_geofunc.h b/sql/item_geofunc.h
index c45fb88a48a..1f64fdba609 100644
--- a/sql/item_geofunc.h
+++ b/sql/item_geofunc.h
@@ -32,6 +32,7 @@ public:
Item_geometry_func(Item *a,Item *b,Item *c) :Item_str_func(a,b,c) {}
Item_geometry_func(List<Item> &list) :Item_str_func(list) {}
void fix_length_and_dec();
+ enum_field_types field_type() const { return MYSQL_TYPE_GEOMETRY; }
};
class Item_func_geometry_from_text: public Item_geometry_func
@@ -67,6 +68,7 @@ public:
Item_func_as_wkb(Item *a): Item_geometry_func(a) {}
const char *func_name() const { return "aswkb"; }
String *val_str(String *);
+ enum_field_types field_type() const { return MYSQL_TYPE_BLOB; }
};
class Item_func_geometry_type: public Item_str_func
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index 4d16e7743b2..dee7f408733 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -405,7 +405,7 @@ void Item_func_concat::fix_length_and_dec()
{
ulonglong max_result_length= 0;
- if (agg_arg_charsets(collation, args, arg_count, MY_COLL_ALLOW_CONV))
+ if (agg_arg_charsets(collation, args, arg_count, MY_COLL_ALLOW_CONV, 1))
return;
for (uint i=0 ; i < arg_count ; i++)
@@ -727,7 +727,7 @@ void Item_func_concat_ws::fix_length_and_dec()
{
ulonglong max_result_length;
- if (agg_arg_charsets(collation, args, arg_count, MY_COLL_ALLOW_CONV))
+ if (agg_arg_charsets(collation, args, arg_count, MY_COLL_ALLOW_CONV, 1))
return;
/*
@@ -752,44 +752,47 @@ String *Item_func_reverse::val_str(String *str)
{
DBUG_ASSERT(fixed == 1);
String *res = args[0]->val_str(str);
- char *ptr,*end;
+ char *ptr, *end, *tmp;
if ((null_value=args[0]->null_value))
return 0;
/* An empty string is a special case as the string pointer may be null */
if (!res->length())
return &my_empty_string;
- res=copy_if_not_alloced(str,res,res->length());
- ptr = (char *) res->ptr();
- end=ptr+res->length();
+ if (tmp_value.alloced_length() < res->length() &&
+ tmp_value.realloc(res->length()))
+ {
+ null_value= 1;
+ return 0;
+ }
+ tmp_value.length(res->length());
+ tmp_value.set_charset(res->charset());
+ ptr= (char *) res->ptr();
+ end= ptr + res->length();
+ tmp= (char *) tmp_value.ptr() + tmp_value.length();
#ifdef USE_MB
if (use_mb(res->charset()))
{
- String tmpstr;
- tmpstr.copy(*res);
- char *tmp = (char *) tmpstr.ptr() + tmpstr.length();
register uint32 l;
while (ptr < end)
{
- if ((l=my_ismbchar(res->charset(), ptr,end)))
- tmp-=l, memcpy(tmp,ptr,l), ptr+=l;
+ if ((l= my_ismbchar(res->charset(),ptr,end)))
+ {
+ tmp-= l;
+ memcpy(tmp,ptr,l);
+ ptr+= l;
+ }
else
- *--tmp=*ptr++;
+ *--tmp= *ptr++;
}
- memcpy((char *) res->ptr(),(char *) tmpstr.ptr(), res->length());
}
else
#endif /* USE_MB */
{
- char tmp;
while (ptr < end)
- {
- tmp=*ptr;
- *ptr++=*--end;
- *end=tmp;
- }
+ *--tmp= *ptr++;
}
- return res;
+ return &tmp_value;
}
@@ -937,7 +940,7 @@ void Item_func_replace::fix_length_and_dec()
}
max_length= (ulong) max_result_length;
- if (agg_arg_charsets(collation, args, 3, MY_COLL_CMP_CONV))
+ if (agg_arg_charsets(collation, args, 3, MY_COLL_CMP_CONV, 1))
return;
}
@@ -982,15 +985,11 @@ null:
void Item_func_insert::fix_length_and_dec()
{
- Item *cargs[2];
ulonglong max_result_length;
- cargs[0]= args[0];
- cargs[1]= args[3];
- if (agg_arg_charsets(collation, cargs, 2, MY_COLL_ALLOW_CONV))
+ // Handle character set for args[0] and args[3].
+ if (agg_arg_charsets(collation, &args[0], 2, MY_COLL_ALLOW_CONV, 3))
return;
- args[0]= cargs[0];
- args[3]= cargs[1];
max_result_length= ((ulonglong) args[0]->max_length+
(ulonglong) args[3]->max_length);
if (max_result_length >= MAX_BLOB_WIDTH)
@@ -1042,7 +1041,7 @@ String *Item_func_left::val_str(String *str)
long length =(long) args[1]->val_int();
uint char_pos;
- if ((null_value=args[0]->null_value))
+ if ((null_value=(args[0]->null_value || args[1]->null_value)))
return 0;
if (length <= 0)
return &my_empty_string;
@@ -1082,7 +1081,7 @@ String *Item_func_right::val_str(String *str)
String *res =args[0]->val_str(str);
long length =(long) args[1]->val_int();
- if ((null_value=args[0]->null_value))
+ if ((null_value=(args[0]->null_value || args[1]->null_value)))
return 0; /* purecov: inspected */
if (length <= 0)
return &my_empty_string; /* purecov: inspected */
@@ -1161,7 +1160,7 @@ void Item_func_substr_index::fix_length_and_dec()
{
max_length= args[0]->max_length;
- if (agg_arg_charsets(collation, args, 2, MY_COLL_CMP_CONV))
+ if (agg_arg_charsets(collation, args, 2, MY_COLL_CMP_CONV, 1))
return;
}
@@ -1497,16 +1496,30 @@ void Item_func_trim::fix_length_and_dec()
}
else
{
- Item *cargs[2];
- cargs[0]= args[1];
- cargs[1]= args[0];
- if (agg_arg_charsets(collation, cargs, 2, MY_COLL_CMP_CONV))
+ // Handle character set for args[1] and args[0].
+ // Note that we pass args[1] as the first item, and args[0] as the second.
+ if (agg_arg_charsets(collation, &args[1], 2, MY_COLL_CMP_CONV, -1))
return;
- args[0]= cargs[1];
- args[1]= cargs[0];
}
}
+void Item_func_trim::print(String *str)
+{
+ if (arg_count == 1)
+ {
+ Item_func::print(str);
+ return;
+ }
+ str->append(Item_func_trim::func_name());
+ str->append('(');
+ str->append(mode_name());
+ str->append(' ');
+ args[1]->print(str);
+ str->append(STRING_WITH_LEN(" from "));
+ args[0]->print(str);
+ str->append(')');
+}
+
/* Item_func_password */
@@ -1667,52 +1680,61 @@ String *Item_func_database::val_str(String *str)
{
DBUG_ASSERT(fixed == 1);
THD *thd= current_thd;
- if (!thd->db)
+ if (thd->db == NULL)
{
null_value= 1;
return 0;
}
else
- str->copy((const char*) thd->db,(uint) strlen(thd->db),system_charset_info);
+ str->copy(thd->db, thd->db_length, system_charset_info);
return str;
}
-// TODO: make USER() replicate properly (currently it is replicated to "")
-String *Item_func_user::val_str(String *str)
+/*
+ TODO: make USER() replicate properly (currently it is replicated to "")
+*/
+bool Item_func_user::init(const char *user, const char *host)
{
DBUG_ASSERT(fixed == 1);
- THD *thd=current_thd;
- CHARSET_INFO *cs= system_charset_info;
- const char *host, *user;
- uint res_length;
- if (is_current)
- {
- user= thd->security_ctx->priv_user;
- host= thd->security_ctx->priv_host;
- }
- else
+ // For system threads (e.g. replication SQL thread) user may be empty
+ if (user)
{
- user= thd->main_security_ctx.user;
- host= thd->main_security_ctx.host_or_ip;
- }
+ CHARSET_INFO *cs= str_value.charset();
+ uint res_length= (strlen(user)+strlen(host)+2) * cs->mbmaxlen;
- // For system threads (e.g. replication SQL thread) user may be empty
- if (!user)
- return &my_empty_string;
- res_length= (strlen(user)+strlen(host)+2) * cs->mbmaxlen;
+ if (str_value.alloc(res_length))
+ {
+ null_value=1;
+ return TRUE;
+ }
- if (str->alloc(res_length))
- {
- null_value=1;
- return 0;
+ res_length=cs->cset->snprintf(cs, (char*)str_value.ptr(), res_length,
+ "%s@%s", user, host);
+ str_value.length(res_length);
+ str_value.mark_as_const();
}
- res_length=cs->cset->snprintf(cs, (char*)str->ptr(), res_length, "%s@%s",
- user, host);
- str->length(res_length);
- str->set_charset(cs);
- return str;
+ return FALSE;
+}
+
+
+bool Item_func_user::fix_fields(THD *thd, Item **ref)
+{
+ return (Item_func_sysconst::fix_fields(thd, ref) ||
+ init(thd->main_security_ctx.user,
+ thd->main_security_ctx.host_or_ip));
+}
+
+
+bool Item_func_current_user::fix_fields(THD *thd, Item **ref)
+{
+ if (Item_func_sysconst::fix_fields(thd, ref))
+ return TRUE;
+
+ Security_context *ctx= (context->security_ctx
+ ? context->security_ctx : thd->security_ctx);
+ return init(ctx->priv_user, ctx->priv_host);
}
@@ -1887,7 +1909,7 @@ void Item_func_elt::fix_length_and_dec()
max_length=0;
decimals=0;
- if (agg_arg_charsets(collation, args+1, arg_count-1, MY_COLL_ALLOW_CONV))
+ if (agg_arg_charsets(collation, args+1, arg_count-1, MY_COLL_ALLOW_CONV, 1))
return;
for (uint i= 1 ; i < arg_count ; i++)
@@ -1954,7 +1976,7 @@ void Item_func_make_set::fix_length_and_dec()
{
max_length=arg_count-1;
- if (agg_arg_charsets(collation, args, arg_count, MY_COLL_ALLOW_CONV))
+ if (agg_arg_charsets(collation, args, arg_count, MY_COLL_ALLOW_CONV, 1))
return;
for (uint i=0 ; i < arg_count ; i++)
@@ -2162,14 +2184,9 @@ err:
void Item_func_rpad::fix_length_and_dec()
{
- Item *cargs[2];
-
- cargs[0]= args[0];
- cargs[1]= args[2];
- if (agg_arg_charsets(collation, cargs, 2, MY_COLL_ALLOW_CONV))
+ // Handle character set for args[0] and args[2].
+ if (agg_arg_charsets(collation, &args[0], 2, MY_COLL_ALLOW_CONV, 2))
return;
- args[0]= cargs[0];
- args[2]= cargs[1];
if (args[1]->const_item())
{
ulonglong length= ((ulonglong) args[1]->val_int() *
@@ -2249,13 +2266,9 @@ String *Item_func_rpad::val_str(String *str)
void Item_func_lpad::fix_length_and_dec()
{
- Item *cargs[2];
- cargs[0]= args[0];
- cargs[1]= args[2];
- if (agg_arg_charsets(collation, cargs, 2, MY_COLL_ALLOW_CONV))
+ // Handle character set for args[0] and args[2].
+ if (agg_arg_charsets(collation, &args[0], 2, MY_COLL_ALLOW_CONV, 2))
return;
- args[0]= cargs[0];
- args[2]= cargs[1];
if (args[1]->const_item())
{
@@ -2712,8 +2725,8 @@ void Item_func_export_set::fix_length_and_dec()
uint sep_length=(arg_count > 3 ? args[3]->max_length : 1);
max_length=length*64+sep_length*63;
- if (agg_arg_charsets(collation, args+1, min(4,arg_count)-1),
- MY_COLL_ALLOW_CONV)
+ if (agg_arg_charsets(collation, args+1, min(4,arg_count)-1,
+ MY_COLL_ALLOW_CONV, 1))
return;
}
diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h
index b2e63deed92..e085c0b4d7b 100644
--- a/sql/item_strfunc.h
+++ b/sql/item_strfunc.h
@@ -105,6 +105,7 @@ public:
class Item_func_reverse :public Item_str_func
{
+ String tmp_value;
public:
Item_func_reverse(Item *a) :Item_str_func(a) {}
String *val_str(String *);
@@ -239,6 +240,8 @@ public:
String *val_str(String *);
void fix_length_and_dec();
const char *func_name() const { return "trim"; }
+ void print(String *str);
+ virtual const char *mode_name() const { return "both"; }
bool check_partition_func_processor(byte *bool_arg) { return 0;}
};
@@ -250,6 +253,7 @@ public:
Item_func_ltrim(Item *a) :Item_func_trim(a) {}
String *val_str(String *);
const char *func_name() const { return "ltrim"; }
+ const char *mode_name() const { return "leading"; }
};
@@ -260,6 +264,7 @@ public:
Item_func_rtrim(Item *a) :Item_func_trim(a) {}
String *val_str(String *);
const char *func_name() const { return "rtrim"; }
+ const char *mode_name() const { return "trailing"; }
};
@@ -393,21 +398,40 @@ public:
class Item_func_user :public Item_func_sysconst
{
- bool is_current;
+protected:
+ bool init (const char *user, const char *host);
public:
- Item_func_user(bool is_current_arg)
- :Item_func_sysconst(), is_current(is_current_arg) {}
- String *val_str(String *);
+ Item_func_user()
+ {
+ str_value.set("", 0, system_charset_info);
+ }
+ String *val_str(String *)
+ {
+ DBUG_ASSERT(fixed == 1);
+ return (null_value ? 0 : &str_value);
+ }
+ bool fix_fields(THD *thd, Item **ref);
void fix_length_and_dec()
{
max_length= ((USERNAME_LENGTH + HOSTNAME_LENGTH + 1) *
system_charset_info->mbmaxlen);
}
- const char *func_name() const
- { return is_current ? "current_user" : "user"; }
- const char *fully_qualified_func_name() const
- { return is_current ? "current_user()" : "user()"; }
+ const char *func_name() const { return "user"; }
+ const char *fully_qualified_func_name() const { return "user()"; }
+};
+
+
+class Item_func_current_user :public Item_func_user
+{
+ Name_resolution_context *context;
+
+public:
+ Item_func_current_user(Name_resolution_context *context_arg)
+ : context(context_arg) {}
+ bool fix_fields(THD *thd, Item **ref);
+ const char *func_name() const { return "current_user"; }
+ const char *fully_qualified_func_name() const { return "current_user()"; }
};
@@ -722,7 +746,7 @@ public:
void fix_length_and_dec();
bool eq(const Item *item, bool binary_cmp) const;
const char *func_name() const { return "collate"; }
- enum Functype func_type() const { return COLLATE_FUNC; }
+ enum Functype functype() const { return COLLATE_FUNC; }
void print(String *str);
Item_field *filed_for_view_update()
{
diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc
index a08ac5d5f6a..7dfe65d793a 100644
--- a/sql/item_subselect.cc
+++ b/sql/item_subselect.cc
@@ -615,14 +615,14 @@ Item_in_subselect::Item_in_subselect(Item * left_exp,
}
Item_allany_subselect::Item_allany_subselect(Item * left_exp,
- Comp_creator *fn,
+ chooser_compare_func_creator fc,
st_select_lex *select_lex,
bool all_arg)
- :Item_in_subselect(), all(all_arg)
+ :Item_in_subselect(), func_creator(fc), all(all_arg)
{
DBUG_ENTER("Item_in_subselect::Item_in_subselect");
left_expr= left_exp;
- func= fn;
+ func= func_creator(all_arg);
init(select_lex, new select_exists_subselect(this));
max_columns= 1;
abort_on_null= 0;
@@ -845,7 +845,8 @@ Item_in_subselect::single_value_transformer(JOIN *join,
if (!select_lex->group_list.elements &&
!select_lex->having &&
!select_lex->with_sum_func &&
- !(select_lex->next_select()))
+ !(select_lex->next_select()) &&
+ select_lex->table_list.elements)
{
Item_sum_hybrid *item;
nesting_map save_allow_sum_func;
diff --git a/sql/item_subselect.h b/sql/item_subselect.h
index 85bd7a1139d..a72c6e85739 100644
--- a/sql/item_subselect.h
+++ b/sql/item_subselect.h
@@ -270,14 +270,13 @@ public:
/* ALL/ANY/SOME subselect */
class Item_allany_subselect :public Item_in_subselect
{
-protected:
- Comp_creator *func;
-
public:
+ chooser_compare_func_creator func_creator;
+ Comp_creator *func;
bool all;
- Item_allany_subselect(Item * left_expr, Comp_creator *f,
- st_select_lex *select_lex, bool all);
+ Item_allany_subselect(Item * left_expr, chooser_compare_func_creator fc,
+ st_select_lex *select_lex, bool all);
// only ALL subquery has upper not
subs_type substype() { return all?ALL_SUBS:ANY_SUBS; }
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index ea0a3c7d154..024b0ecfb42 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -381,7 +381,9 @@ Field *Item_sum::create_tmp_field(bool group, TABLE *table,
field= new Field_longlong(max_length, maybe_null, name, unsigned_flag);
break;
case STRING_RESULT:
- if (max_length/collation.collation->mbmaxlen <= 255 || !convert_blob_length)
+ if (max_length/collation.collation->mbmaxlen <= 255 ||
+ max_length/collation.collation->mbmaxlen >=UINT_MAX16 ||
+ !convert_blob_length)
return make_string_field(table);
field= new Field_varstring(convert_blob_length, maybe_null,
name, table->s, collation.collation);
@@ -2662,8 +2664,7 @@ bool Item_sum_count_distinct::add()
return tree->unique_add(table->record[0] + table->s->null_bytes);
}
if ((error= table->file->ha_write_row(table->record[0])) &&
- error != HA_ERR_FOUND_DUPP_KEY &&
- error != HA_ERR_FOUND_DUPP_UNIQUE)
+ table->file->is_fatal_error(error, HA_CHECK_DUP))
return TRUE;
return FALSE;
}
@@ -3249,7 +3250,7 @@ Item_func_group_concat::fix_fields(THD *thd, Item **ref)
args,
/* skip charset aggregation for order columns */
arg_count - arg_count_order,
- MY_COLL_ALLOW_CONV))
+ MY_COLL_ALLOW_CONV, 1))
return 1;
result.set_charset(collation.collation);
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index d38069e54da..2c8d8423d50 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -27,27 +27,9 @@
/* TODO: Move month and days to language files */
+/* Day number for Dec 31st, 9999 */
#define MAX_DAY_NUMBER 3652424L
-static const char *month_names[]=
-{
- "January", "February", "March", "April", "May", "June", "July", "August",
- "September", "October", "November", "December", NullS
-};
-
-TYPELIB month_names_typelib=
-{ array_elements(month_names)-1,"", month_names, NULL };
-
-static const char *day_names[]=
-{
- "Monday", "Tuesday", "Wednesday",
- "Thursday", "Friday", "Saturday" ,"Sunday", NullS
-};
-
-TYPELIB day_names_typelib=
-{ array_elements(day_names)-1,"", day_names, NULL};
-
-
/*
OPTIMIZATION TODO:
- Replace the switch with a function that should be called for each
@@ -222,8 +204,12 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
val= tmp;
break;
case 'M':
+ if ((l_time->month= check_word(my_locale_en_US.month_names,
+ val, val_end, &val)) <= 0)
+ goto err;
+ break;
case 'b':
- if ((l_time->month= check_word(&month_names_typelib,
+ if ((l_time->month= check_word(my_locale_en_US.ab_month_names,
val, val_end, &val)) <= 0)
goto err;
break;
@@ -298,8 +284,11 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
/* Exotic things */
case 'W':
+ if ((weekday= check_word(my_locale_en_US.day_names, val, val_end, &val)) <= 0)
+ goto err;
+ break;
case 'a':
- if ((weekday= check_word(&day_names_typelib, val, val_end, &val)) <= 0)
+ if ((weekday= check_word(my_locale_en_US.ab_day_names, val, val_end, &val)) <= 0)
goto err;
break;
case 'w':
@@ -408,7 +397,7 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
if (yearday > 0)
{
uint days= calc_daynr(l_time->year,1,1) + yearday - 1;
- if (days <= 0 || days >= MAX_DAY_NUMBER)
+ if (days <= 0 || days > MAX_DAY_NUMBER)
goto err;
get_date_from_daynr(days,&l_time->year,&l_time->month,&l_time->day);
}
@@ -454,7 +443,7 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
(weekday - 1);
}
- if (days <= 0 || days >= MAX_DAY_NUMBER)
+ if (days <= 0 || days > MAX_DAY_NUMBER)
goto err;
get_date_from_daynr(days,&l_time->year,&l_time->month,&l_time->day);
}
@@ -501,9 +490,16 @@ bool make_date_time(DATE_TIME_FORMAT *format, TIME *l_time,
uint weekday;
ulong length;
const char *ptr, *end;
+ MY_LOCALE *locale;
+ THD *thd= current_thd;
+ char buf[STRING_BUFFER_USUAL_SIZE];
+ String tmp(buf, sizeof(buf), thd->variables.character_set_results);
+ uint errors= 0;
+ tmp.length(0);
str->length(0);
str->set_charset(&my_charset_bin);
+ locale = thd->variables.lc_time_names;
if (l_time->neg)
str->append('-');
@@ -519,26 +515,38 @@ bool make_date_time(DATE_TIME_FORMAT *format, TIME *l_time,
case 'M':
if (!l_time->month)
return 1;
- str->append(month_names[l_time->month-1]);
+ tmp.copy(locale->month_names->type_names[l_time->month-1],
+ strlen(locale->month_names->type_names[l_time->month-1]),
+ system_charset_info, tmp.charset(), &errors);
+ str->append(tmp.ptr(), tmp.length());
break;
case 'b':
if (!l_time->month)
return 1;
- str->append(month_names[l_time->month-1],3);
+ tmp.copy(locale->ab_month_names->type_names[l_time->month-1],
+ strlen(locale->ab_month_names->type_names[l_time->month-1]),
+ system_charset_info, tmp.charset(), &errors);
+ str->append(tmp.ptr(), tmp.length());
break;
case 'W':
if (type == MYSQL_TIMESTAMP_TIME)
return 1;
weekday= calc_weekday(calc_daynr(l_time->year,l_time->month,
l_time->day),0);
- str->append(day_names[weekday]);
+ tmp.copy(locale->day_names->type_names[weekday],
+ strlen(locale->day_names->type_names[weekday]),
+ system_charset_info, tmp.charset(), &errors);
+ str->append(tmp.ptr(), tmp.length());
break;
case 'a':
if (type == MYSQL_TIMESTAMP_TIME)
return 1;
weekday=calc_weekday(calc_daynr(l_time->year,l_time->month,
l_time->day),0);
- str->append(day_names[weekday],3);
+ tmp.copy(locale->ab_day_names->type_names[weekday],
+ strlen(locale->ab_day_names->type_names[weekday]),
+ system_charset_info, tmp.charset(), &errors);
+ str->append(tmp.ptr(), tmp.length());
break;
case 'D':
if (type == MYSQL_TIMESTAMP_TIME)
@@ -871,6 +879,7 @@ String* Item_func_monthname::val_str(String* str)
DBUG_ASSERT(fixed == 1);
const char *month_name;
uint month= (uint) val_int();
+ THD *thd= current_thd;
if (null_value || !month)
{
@@ -878,7 +887,7 @@ String* Item_func_monthname::val_str(String* str)
return (String*) 0;
}
null_value=0;
- month_name= month_names[month-1];
+ month_name= thd->variables.lc_time_names->month_names->type_names[month-1];
str->set(month_name, strlen(month_name), system_charset_info);
return str;
}
@@ -1003,11 +1012,12 @@ String* Item_func_dayname::val_str(String* str)
DBUG_ASSERT(fixed == 1);
uint weekday=(uint) val_int(); // Always Item_func_daynr()
const char *name;
+ THD *thd= current_thd;
if (null_value)
return (String*) 0;
- name= day_names[weekday];
+ name= thd->variables.lc_time_names->day_names->type_names[weekday];
str->set(name, strlen(name), system_charset_info);
return str;
}
@@ -1651,7 +1661,7 @@ uint Item_func_date_format::format_length(const String *format)
switch(*++ptr) {
case 'M': /* month, textual */
case 'W': /* day (of the week), textual */
- size += 9;
+ size += 64; /* large for UTF8 locale data */
break;
case 'D': /* day (of the month), numeric plus english suffix */
case 'Y': /* year, numeric, 4 digits */
@@ -1661,6 +1671,8 @@ uint Item_func_date_format::format_length(const String *format)
break;
case 'a': /* locale's abbreviated weekday name (Sun..Sat) */
case 'b': /* locale's abbreviated month name (Jan.Dec) */
+ size += 32; /* large for UTF8 locale data */
+ break;
case 'j': /* day of year (001..366) */
size += 3;
break;
@@ -1962,7 +1974,6 @@ bool Item_date_add_interval::get_date(TIME *ltime, uint fuzzy_date)
if (date_sub_interval)
interval.neg = !interval.neg;
-
if (ltime->year < YY_MAGIC_BELOW)
return (null_value=1);
@@ -2450,7 +2461,7 @@ String *Item_func_makedate::val_str(String *str)
days= calc_daynr(yearnr,1,1) + daynr - 1;
/* Day number from year 0 to 9999-12-31 */
- if (days >= 0 && days < MAX_DAY_NUMBER)
+ if (days >= 0 && days <= MAX_DAY_NUMBER)
{
null_value=0;
get_date_from_daynr(days,&l_time.year,&l_time.month,&l_time.day);
diff --git a/sql/item_xmlfunc.cc b/sql/item_xmlfunc.cc
index fb5ca083eab..ed4b81c897f 100644
--- a/sql/item_xmlfunc.cc
+++ b/sql/item_xmlfunc.cc
@@ -2396,7 +2396,7 @@ void Item_xml_str_func::fix_length_and_dec()
nodeset_func= 0;
- if (agg_arg_charsets(collation, args, arg_count, MY_COLL_CMP_CONV))
+ if (agg_arg_charsets(collation, args, arg_count, MY_COLL_CMP_CONV, 1))
return;
if (collation.collation->mbminlen > 1)
diff --git a/sql/lock.cc b/sql/lock.cc
index e5003325df6..8e75ea42f7d 100644
--- a/sql/lock.cc
+++ b/sql/lock.cc
@@ -935,7 +935,7 @@ void unlock_table_name(THD *thd, TABLE_LIST *table_list)
if (table_list->table)
{
hash_delete(&open_cache, (byte*) table_list->table);
- (void) pthread_cond_broadcast(&COND_refresh);
+ broadcast_refresh();
}
}
@@ -1037,9 +1037,9 @@ end:
(default 0, which will unlock all tables)
NOTES
- One must have a lock on LOCK_open when calling this
- This function will send a COND_refresh signal to inform other threads
- that the name locks are removed
+ One must have a lock on LOCK_open when calling this.
+ This function will broadcast refresh signals to inform other threads
+ that the name locks are removed.
RETURN
0 ok
@@ -1054,7 +1054,7 @@ void unlock_table_names(THD *thd, TABLE_LIST *table_list,
table != last_table;
table= table->next_local)
unlock_table_name(thd,table);
- pthread_cond_broadcast(&COND_refresh);
+ broadcast_refresh();
DBUG_VOID_RETURN;
}
@@ -1344,3 +1344,39 @@ bool make_global_read_lock_block_commit(THD *thd)
thd->exit_cond(old_message); // this unlocks LOCK_global_read_lock
DBUG_RETURN(error);
}
+
+
+/*
+ Broadcast COND_refresh and COND_global_read_lock.
+
+ SYNOPSIS
+ broadcast_refresh()
+ void No parameters.
+
+ DESCRIPTION
+ Due to a bug in a threading library it could happen that a signal
+ did not reach its target. A condition for this was that the same
+ condition variable was used with different mutexes in
+ pthread_cond_wait(). Some time ago we changed LOCK_open to
+ LOCK_global_read_lock in global read lock handling. So COND_refresh
+ was used with LOCK_open and LOCK_global_read_lock.
+
+ We did now also change from COND_refresh to COND_global_read_lock
+ in global read lock handling. But now it is necessary to signal
+ both conditions at the same time.
+
+ NOTE
+ When signalling COND_global_read_lock within the global read lock
+ handling, it is not necessary to also signal COND_refresh.
+
+ RETURN
+ void
+*/
+
+void broadcast_refresh(void)
+{
+ VOID(pthread_cond_broadcast(&COND_refresh));
+ VOID(pthread_cond_broadcast(&COND_global_read_lock));
+}
+
+
diff --git a/sql/log.cc b/sql/log.cc
index ec73400ea3c..dba4b65efd9 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -42,6 +42,8 @@ LOGGER logger;
MYSQL_BIN_LOG mysql_bin_log;
ulong sync_binlog_counter= 0;
+static Muted_query_log_event invisible_commit;
+
static bool test_if_number(const char *str,
long *res, bool allow_wildcards);
static int binlog_init();
@@ -430,16 +432,23 @@ bool Log_to_csv_event_handler::
table->field[6]->set_notnull();
}
- if (thd->last_insert_id_used)
+ if (thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt)
{
- table->field[7]->store((longlong) thd->current_insert_id, TRUE);
+ table->field[7]->store((longlong)
+ thd->first_successful_insert_id_in_prev_stmt_for_binlog, TRUE);
table->field[7]->set_notnull();
}
- /* set value if we do an insert on autoincrement column */
- if (thd->insert_id_used)
+ /*
+ Set value if we do an insert on autoincrement column. Note that for
+ some engines (those for which get_auto_increment() does not leave a
+ table lock until the statement ends), this is just the first value and
+ the next ones used may not be contiguous to it.
+ */
+ if (thd->auto_inc_intervals_in_cur_stmt_for_binlog.nb_elements() > 0)
{
- table->field[8]->store((longlong) thd->last_insert_id, TRUE);
+ table->field[8]->store((longlong)
+ thd->auto_inc_intervals_in_cur_stmt_for_binlog.minimum(), TRUE);
table->field[8]->set_notnull();
}
@@ -729,7 +738,6 @@ bool LOGGER::slow_log_print(THD *thd, const char *query, uint query_length,
Security_context *sctx= thd->security_ctx;
uint message_buff_len= 0, user_host_len= 0;
longlong query_time= 0, lock_time= 0;
- longlong last_insert_id= 0, insert_id= 0;
/*
Print the message to the buffer if we have slow log enabled
@@ -764,13 +772,6 @@ bool LOGGER::slow_log_print(THD *thd, const char *query, uint query_length,
lock_time= (longlong) (thd->time_after_lock - query_start_arg);
}
- if (thd->last_insert_id_used)
- last_insert_id= (longlong) thd->current_insert_id;
-
- /* set value if we do an insert on autoincrement column */
- if (thd->insert_id_used)
- insert_id= (longlong) thd->last_insert_id;
-
if (!query)
{
is_command= TRUE;
@@ -1188,7 +1189,9 @@ binlog_end_trans(THD *thd, binlog_trx_data *trx_data, Log_event *end_ev)
int error=0;
IO_CACHE *trans_log= &trx_data->trans_log;
- if (end_ev)
+
+ /* NULL denotes ROLLBACK with nothing to replicate */
+ if (end_ev != NULL)
{
/*
We can always end the statement when ending a transaction since
@@ -1259,9 +1262,14 @@ static int binlog_commit(THD *thd, bool all)
// we're here because trans_log was flushed in MYSQL_BIN_LOG::log()
DBUG_RETURN(0);
}
- Query_log_event qev(thd, STRING_WITH_LEN("COMMIT"), TRUE, FALSE);
- qev.error_code= 0; // see comment in MYSQL_LOG::write(THD, IO_CACHE)
- DBUG_RETURN(binlog_end_trans(thd, trx_data, &qev));
+ if (all)
+ {
+ Query_log_event qev(thd, STRING_WITH_LEN("COMMIT"), TRUE, FALSE);
+ qev.error_code= 0; // see comment in MYSQL_LOG::write(THD, IO_CACHE)
+ DBUG_RETURN(binlog_end_trans(thd, trx_data, &qev));
+ }
+ else
+ DBUG_RETURN(binlog_end_trans(thd, trx_data, &invisible_commit));
}
static int binlog_rollback(THD *thd, bool all)
@@ -1922,18 +1930,22 @@ bool MYSQL_QUERY_LOG::write(THD *thd, time_t current_time,
tmp_errno= errno;
strmov(db,thd->db);
}
- if (thd->last_insert_id_used)
+ if (thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt)
{
end=strmov(end, ",last_insert_id=");
- end=longlong10_to_str((longlong) thd->current_insert_id, end, -10);
+ end=longlong10_to_str((longlong)
+ thd->first_successful_insert_id_in_prev_stmt_for_binlog,
+ end, -10);
}
// Save value if we do an insert.
- if (thd->insert_id_used)
+ if (thd->auto_inc_intervals_in_cur_stmt_for_binlog.nb_elements() > 0)
{
if (!(specialflag & SPECIAL_SHORT_LOG_FORMAT))
{
end=strmov(end,",insert_id=");
- end=longlong10_to_str((longlong) thd->last_insert_id, end, -10);
+ end=longlong10_to_str((longlong)
+ thd->auto_inc_intervals_in_cur_stmt_for_binlog.minimum(),
+ end, -10);
}
}
@@ -3354,21 +3366,24 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info)
{
if (!thd->current_stmt_binlog_row_based)
{
- if (thd->last_insert_id_used)
+ if (thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt)
{
Intvar_log_event e(thd,(uchar) LAST_INSERT_ID_EVENT,
- thd->current_insert_id);
+ thd->first_successful_insert_id_in_prev_stmt_for_binlog);
if (e.write(file))
goto err;
}
- if (thd->insert_id_used)
+ if (thd->auto_inc_intervals_in_cur_stmt_for_binlog.nb_elements() > 0)
{
+ DBUG_PRINT("info",("number of auto_inc intervals: %lu",
+ thd->auto_inc_intervals_in_cur_stmt_for_binlog.nb_elements()));
/*
If the auto_increment was second in a table's index (possible with
MyISAM or BDB) (table->next_number_key_offset != 0), such event is
in fact not necessary. We could avoid logging it.
*/
- Intvar_log_event e(thd,(uchar) INSERT_ID_EVENT,thd->last_insert_id);
+ Intvar_log_event e(thd,(uchar) INSERT_ID_EVENT,
+ thd->auto_inc_intervals_in_cur_stmt_for_binlog.minimum());
if (e.write(file))
goto err;
}
@@ -3395,6 +3410,9 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info)
}
}
}
+ /* Forget those values, for next binlogger: */
+ thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt= 0;
+ thd->auto_inc_intervals_in_cur_stmt_for_binlog.empty();
}
/*
@@ -3516,6 +3534,9 @@ bool MYSQL_BIN_LOG::write(THD *thd, IO_CACHE *cache, Log_event *commit_event)
DBUG_ENTER("MYSQL_BIN_LOG::write(THD *, IO_CACHE *, Log_event *)");
VOID(pthread_mutex_lock(&LOCK_log));
+ /* NULL would represent nothing to replicate after ROLLBACK */
+ DBUG_ASSERT(commit_event != NULL);
+
if (likely(is_open())) // Should always be true
{
uint length;
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 36805e0043d..ebd90446a7e 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -1287,6 +1287,18 @@ bool Query_log_event::write(IO_CACHE* file)
my_b_safe_write(file, (byte*) query, q_len)) ? 1 : 0;
}
+/*
+ Query_log_event::Query_log_event()
+
+ The simplest constructor that could possibly work. This is used for
+ creating static objects that have a special meaning and are invisible
+ to the log.
+*/
+Query_log_event::Query_log_event()
+ :Log_event(), data_buf(0)
+{
+}
+
/*
Query_log_event::Query_log_event()
@@ -1681,14 +1693,17 @@ void Query_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
*/
#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT)
+
int Query_log_event::exec_event(struct st_relay_log_info* rli)
{
return exec_event(rli, query, q_len);
}
-int Query_log_event::exec_event(struct st_relay_log_info* rli, const char *query_arg, uint32 q_len_arg)
+int Query_log_event::exec_event(struct st_relay_log_info* rli,
+ const char *query_arg, uint32 q_len_arg)
{
+ LEX_STRING new_db;
int expected_error,actual_error= 0;
/*
Colleagues: please never free(thd->catalog) in MySQL. This would lead to
@@ -1697,8 +1712,9 @@ int Query_log_event::exec_event(struct st_relay_log_info* rli, const char *query
Thank you.
*/
thd->catalog= catalog_len ? (char *) catalog : (char *)"";
- thd->db_length= db_len;
- thd->db= (char *) rpl_filter->get_rewrite_db(db, &thd->db_length);
+ new_db.length= db_len;
+ new_db.str= (char *) rpl_filter->get_rewrite_db(db, &new_db.length);
+ thd->set_db(new_db.str, new_db.length); /* allocates a copy of 'db' */
thd->variables.auto_increment_increment= auto_increment_increment;
thd->variables.auto_increment_offset= auto_increment_offset;
@@ -1916,11 +1932,22 @@ end:
don't suffer from these assignments to 0 as DROP TEMPORARY
TABLE uses the db.table syntax.
*/
- thd->db= thd->catalog= 0; // prevent db from being freed
+ thd->catalog= 0;
+ thd->set_db(NULL, 0); /* will free the current database */
thd->query= 0; // just to be sure
- thd->query_length= thd->db_length =0;
+ thd->query_length= 0;
VOID(pthread_mutex_unlock(&LOCK_thread_count));
close_thread_tables(thd);
+ /*
+ As a disk space optimization, future masters will not log an event for
+ LAST_INSERT_ID() if that function returned 0 (and thus they will be able
+ to replace the THD::stmt_depends_on_first_successful_insert_id_in_prev_stmt
+ variable by (THD->first_successful_insert_id_in_prev_stmt > 0) ; with the
+ resetting below we are ready to support that.
+ */
+ thd->first_successful_insert_id_in_prev_stmt_for_binlog= 0;
+ thd->first_successful_insert_id_in_prev_stmt= 0;
+ thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt= 0;
free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC));
/*
If there was an error we stop. Otherwise we increment positions. Note that
@@ -1936,6 +1963,21 @@ end:
/**************************************************************************
+ Muted_query_log_event methods
+**************************************************************************/
+
+#ifndef MYSQL_CLIENT
+/*
+ Muted_query_log_event::Muted_query_log_event()
+*/
+Muted_query_log_event::Muted_query_log_event()
+ :Query_log_event()
+{
+}
+#endif
+
+
+/**************************************************************************
Start_log_event_v3 methods
**************************************************************************/
@@ -2868,8 +2910,10 @@ void Load_log_event::set_fields(const char* affected_db,
int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli,
bool use_rli_only_for_errors)
{
- thd->db_length= db_len;
- thd->db= (char *) rpl_filter->get_rewrite_db(db, &thd->db_length);
+ LEX_STRING new_db;
+ new_db.length= db_len;
+ new_db.str= (char *) rpl_filter->get_rewrite_db(db, &new_db.length);
+ thd->set_db(new_db.str, new_db.length);
DBUG_ASSERT(thd->query == 0);
thd->query_length= 0; // Should not be needed
thd->query_error= 0;
@@ -2927,7 +2971,7 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli,
TABLE_LIST tables;
bzero((char*) &tables,sizeof(tables));
- tables.db = thd->db;
+ tables.db= thd->strmake(thd->db, thd->db_length);
tables.alias = tables.table_name = (char*) table_name;
tables.lock_type = TL_WRITE;
tables.updating= 1;
@@ -3022,7 +3066,7 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli,
ex.skip_lines = skip_lines;
List<Item> field_list;
thd->main_lex.select_lex.context.resolve_in_table_list_only(&tables);
- set_fields(thd->db, field_list, &thd->main_lex.select_lex.context);
+ set_fields(tables.db, field_list, &thd->main_lex.select_lex.context);
thd->variables.pseudo_thread_id= thread_id;
List<Item> set_fields;
if (net)
@@ -3069,11 +3113,12 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli,
error:
thd->net.vio = 0;
- char *save_db= thd->db;
+ const char *remember_db= thd->db;
VOID(pthread_mutex_lock(&LOCK_thread_count));
- thd->db= thd->catalog= 0;
+ thd->catalog= 0;
+ thd->set_db(NULL, 0); /* will free the current database */
thd->query= 0;
- thd->query_length= thd->db_length= 0;
+ thd->query_length= 0;
VOID(pthread_mutex_unlock(&LOCK_thread_count));
close_thread_tables(thd);
if (thd->query_error)
@@ -3090,7 +3135,7 @@ error:
}
slave_print_msg(ERROR_LEVEL, rli, sql_errno,"\
Error '%s' running LOAD DATA INFILE on table '%s'. Default database: '%s'",
- err, (char*)table_name, print_slave_db_safe(save_db));
+ err, (char*)table_name, print_slave_db_safe(remember_db));
free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC));
return 1;
}
@@ -3100,7 +3145,7 @@ Error '%s' running LOAD DATA INFILE on table '%s'. Default database: '%s'",
{
slave_print_msg(ERROR_LEVEL, rli, ER_UNKNOWN_ERROR, "\
Fatal error running LOAD DATA INFILE on table '%s'. Default database: '%s'",
- (char*)table_name, print_slave_db_safe(save_db));
+ (char*)table_name, print_slave_db_safe(remember_db));
return 1;
}
@@ -3175,8 +3220,7 @@ Rotate_log_event::Rotate_log_event(const char* new_log_ident_arg,
llstr(pos_arg, buff), flags));
#endif
if (flags & DUP_NAME)
- new_log_ident= my_strndup((const byte*) new_log_ident_arg,
- ident_len, MYF(MY_WME));
+ new_log_ident= my_strndup(new_log_ident_arg, ident_len, MYF(MY_WME));
DBUG_VOID_RETURN;
}
#endif
@@ -3199,9 +3243,7 @@ Rotate_log_event::Rotate_log_event(const char* buf, uint event_len,
(header_size+post_header_len));
ident_offset = post_header_len;
set_if_smaller(ident_len,FN_REFLEN-1);
- new_log_ident= my_strndup((byte*) buf + ident_offset,
- (uint) ident_len,
- MYF(MY_WME));
+ new_log_ident= my_strndup(buf + ident_offset, (uint) ident_len, MYF(MY_WME));
DBUG_VOID_RETURN;
}
@@ -3399,11 +3441,11 @@ int Intvar_log_event::exec_event(struct st_relay_log_info* rli)
{
switch (type) {
case LAST_INSERT_ID_EVENT:
- thd->last_insert_id_used = 1;
- thd->last_insert_id = val;
+ thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt= 1;
+ thd->first_successful_insert_id_in_prev_stmt= val;
break;
case INSERT_ID_EVENT:
- thd->next_insert_id = val;
+ thd->force_one_auto_inc_interval(val);
break;
}
rli->inc_event_relay_log_pos();
@@ -5327,10 +5369,10 @@ int Rows_log_event::exec_event(st_relay_log_info *rli)
/*
lock_tables() reads the contents of thd->lex, so they must be
- initialized, so we should call lex_start(); to be even safer, we
- call mysql_init_query() which does a more complete set of inits.
+ initialized. Contrary to in Table_map_log_event::exec_event() we don't
+ call mysql_init_query() as that may reset the binlog format.
*/
- mysql_init_query(thd, NULL, 0);
+ lex_start(thd, NULL, 0);
while ((error= lock_tables(thd, rli->tables_to_lock,
rli->tables_to_lock_count, &need_reopen)))
@@ -5813,7 +5855,7 @@ int Table_map_log_event::exec_event(st_relay_log_info *rli)
if (memory == NULL)
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
- uint32 dummy_len;
+ uint dummy_len;
bzero(table_list, sizeof(*table_list));
table_list->db = db_mem;
table_list->alias= table_list->table_name = tname_mem;
@@ -5834,6 +5876,12 @@ int Table_map_log_event::exec_event(st_relay_log_info *rli)
else
{
/*
+ open_tables() reads the contents of thd->lex, so they must be
+ initialized, so we should call lex_start(); to be even safer, we
+ call mysql_init_query() which does a more complete set of inits.
+ */
+ mysql_init_query(thd, NULL, 0);
+ /*
Check if the slave is set to use SBR. If so, it should switch
to using RBR until the end of the "statement", i.e., next
STMT_END_F or next error.
@@ -5849,12 +5897,6 @@ int Table_map_log_event::exec_event(st_relay_log_info *rli)
Note that for any table that should not be replicated, a filter is needed.
*/
uint count;
- /*
- open_tables() reads the contents of thd->lex, so they must be
- initialized, so we should call lex_start(); to be even safer, we
- call mysql_init_query() which does a more complete set of inits.
- */
- mysql_init_query(thd, NULL, 0);
if ((error= open_tables(thd, &table_list, &count, 0)))
{
if (thd->query_error || thd->is_fatal_error)
@@ -6112,6 +6154,7 @@ int Write_rows_log_event::do_before_row_operations(TABLE *table)
thd->lex->sql_command= SQLCOM_REPLACE;
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); // Needed for ndbcluster
+ table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE); // Needed for ndbcluster
table->file->extra(HA_EXTRA_IGNORE_NO_KEY); // Needed for ndbcluster
/*
TODO: the cluster team (Tomas?) says that it's better if the engine knows
diff --git a/sql/log_event.h b/sql/log_event.h
index 63588ed9b33..313b5174da9 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -808,6 +808,7 @@ public:
void print(FILE* file, PRINT_EVENT_INFO* print_event_info);
#endif
+ Query_log_event();
Query_log_event(const char* buf, uint event_len,
const Format_description_log_event *description_event,
Log_event_type event_type);
@@ -831,6 +832,26 @@ public:
/* Writes derived event-specific part of post header. */
};
+
+/*****************************************************************************
+
+ Muted Query Log Event class
+
+ Pretends to Log SQL queries, but doesn't actually do so.
+
+ ****************************************************************************/
+class Muted_query_log_event: public Query_log_event
+{
+public:
+#ifndef MYSQL_CLIENT
+ Muted_query_log_event();
+
+ bool write(IO_CACHE* file) { return(false); };
+ virtual bool write_post_header_for_derived(IO_CACHE* file) { return FALSE; }
+#endif
+};
+
+
#ifdef HAVE_REPLICATION
/*****************************************************************************
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index c0b453b7d69..0b3a76a71cf 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -101,6 +101,23 @@ char* query_table_status(THD *thd,const char *db,const char *table_name);
extern CHARSET_INFO *system_charset_info, *files_charset_info ;
extern CHARSET_INFO *national_charset_info, *table_alias_charset;
+
+typedef struct my_locale_st
+{
+ const char *name;
+ const char *description;
+ const bool is_ascii;
+ TYPELIB *month_names;
+ TYPELIB *ab_month_names;
+ TYPELIB *day_names;
+ TYPELIB *ab_day_names;
+} MY_LOCALE;
+
+extern MY_LOCALE my_locale_en_US;
+extern MY_LOCALE *my_locales[];
+
+MY_LOCALE *my_locale_by_name(const char *name);
+
/***************************************************************************
Configuration parameters
****************************************************************************/
@@ -521,11 +538,13 @@ enum enum_var_type
OPT_DEFAULT= 0, OPT_SESSION, OPT_GLOBAL
};
class sys_var;
-#include "item.h"
-extern my_decimal decimal_zero;
#ifdef MYSQL_SERVER
+class Comp_creator;
typedef Comp_creator* (*chooser_compare_func_creator)(bool invert);
#endif
+#include "item.h"
+extern my_decimal decimal_zero;
+
/* sql_parse.cc */
void free_items(Item *item);
void cleanup_items(Item *item);
@@ -555,6 +574,7 @@ int append_query_string(CHARSET_INFO *csinfo,
void get_default_definer(THD *thd, LEX_USER *definer);
LEX_USER *create_default_definer(THD *thd);
LEX_USER *create_definer(THD *thd, LEX_STRING *user_name, LEX_STRING *host_name);
+LEX_USER *get_current_user(THD *thd, LEX_USER *user);
enum enum_mysql_completiontype {
ROLLBACK_RELEASE=-2, ROLLBACK=1, ROLLBACK_AND_CHAIN=7,
@@ -587,6 +607,7 @@ struct Query_cache_query_flags
ulong sql_mode;
ulong max_sort_length;
ulong group_concat_max_len;
+ MY_LOCALE *lc_time_names;
};
#define QUERY_CACHE_FLAGS_SIZE sizeof(Query_cache_query_flags)
#include "sql_cache.h"
@@ -872,9 +893,7 @@ bool mysql_alter_table(THD *thd, char *new_db, char *new_name,
TABLE_LIST *table_list,
List<create_field> &fields,
List<Key> &keys,
- uint order_num, ORDER *order,
- enum enum_duplicates handle_duplicates,
- bool ignore,
+ uint order_num, ORDER *order, bool ignore,
ALTER_INFO *alter_info, bool do_send_ok);
bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list, bool do_send_ok);
bool mysql_create_like_table(THD *thd, TABLE_LIST *table,
@@ -1696,6 +1715,7 @@ void start_waiting_global_read_lock(THD *thd);
bool make_global_read_lock_block_commit(THD *thd);
bool set_protect_against_global_read_lock(void);
void unset_protect_against_global_read_lock(void);
+void broadcast_refresh(void);
/* Lock based on name */
int lock_and_wait_for_table_name(THD *thd, TABLE_LIST *table_list);
@@ -1971,6 +1991,17 @@ inline int hexchar_to_int(char c)
}
/*
+ is_user_table()
+ return true if the table was created explicitly
+*/
+
+inline bool is_user_table(TABLE * table)
+{
+ const char *name= table->s->table_name.str;
+ return strncmp(name, tmp_file_prefix, tmp_file_prefix_length);
+}
+
+/*
Some functions that are different in the embedded library and the normal
server
*/
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 6e57993a61a..93ed663ae06 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -323,7 +323,9 @@ static char *opt_init_slave, *language_ptr, *opt_init_connect;
static char *default_character_set_name;
static char *character_set_filesystem_name;
static char *my_bind_addr_str;
-static char *default_collation_name, *default_storage_engine_str;
+static char *default_collation_name;
+static char *default_storage_engine_str;
+static char compiled_default_collation_name[]= MYSQL_DEFAULT_COLLATION_NAME;
static char mysql_data_home_buff[2];
static struct passwd *user_info;
static I_List<THD> thread_cache;
@@ -7047,7 +7049,7 @@ static void mysql_init_variables(void)
/* Variables in libraries */
charsets_dir= 0;
default_character_set_name= (char*) MYSQL_DEFAULT_CHARSET_NAME;
- default_collation_name= (char*) MYSQL_DEFAULT_COLLATION_NAME;
+ default_collation_name= compiled_default_collation_name;
sys_charset_system.value= (char*) system_charset_info->csname;
character_set_filesystem_name= (char*) "binary";
@@ -7181,7 +7183,8 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
strmake(mysql_home,argument,sizeof(mysql_home)-1);
break;
case 'C':
- default_collation_name= 0;
+ if (default_collation_name == compiled_default_collation_name)
+ default_collation_name= 0;
break;
case 'l':
opt_log=1;
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index ea7654dc8aa..a5a46ba11b6 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -85,18 +85,119 @@ static int sel_cmp(Field *f,char *a,char *b,uint8 a_flag,uint8 b_flag);
static char is_null_string[2]= {1,0};
+
+/*
+ A construction block of the SEL_ARG-graph.
+
+ The following description only covers graphs of SEL_ARG objects with
+ sel_arg->type==KEY_RANGE:
+
+ One SEL_ARG object represents an "elementary interval" in form
+
+ min_value <=? table.keypartX <=? max_value
+
+ The interval is a non-empty interval of any kind: with[out] minimum/maximum
+ bound, [half]open/closed, single-point interval, etc.
+
+ 1. SEL_ARG GRAPH STRUCTURE
+
+ SEL_ARG objects are linked together in a graph. The meaning of the graph
+ is better demostrated by an example:
+
+ tree->keys[i]
+ |
+ | $ $
+ | part=1 $ part=2 $ part=3
+ | $ $
+ | +-------+ $ +-------+ $ +--------+
+ | | kp1<1 |--$-->| kp2=5 |--$-->| kp3=10 |
+ | +-------+ $ +-------+ $ +--------+
+ | | $ $ |
+ | | $ $ +--------+
+ | | $ $ | kp3=12 |
+ | | $ $ +--------+
+ | +-------+ $ $
+ \->| kp1=2 |--$--------------$-+
+ +-------+ $ $ | +--------+
+ | $ $ ==>| kp3=11 |
+ +-------+ $ $ | +--------+
+ | kp1=3 |--$--------------$-+ |
+ +-------+ $ $ +--------+
+ | $ $ | kp3=14 |
+ ... $ $ +--------+
+
+ The entire graph is partitioned into "interval lists".
+
+ An interval list is a sequence of ordered disjoint intervals over the same
+ key part. SEL_ARG are linked via "next" and "prev" pointers. Additionally,
+ all intervals in the list form an RB-tree, linked via left/right/parent
+ pointers. The RB-tree root SEL_ARG object will be further called "root of the
+ interval list".
+
+ In the example pic, there are 4 interval lists:
+ "kp<1 OR kp1=2 OR kp1=3", "kp2=5", "kp3=10 OR kp3=12", "kp3=11 OR kp3=13".
+ The vertical lines represent SEL_ARG::next/prev pointers.
+
+ In an interval list, each member X may have SEL_ARG::next_key_part pointer
+ pointing to the root of another interval list Y. The pointed interval list
+ must cover a key part with greater number (i.e. Y->part > X->part).
+
+ In the example pic, the next_key_part pointers are represented by
+ horisontal lines.
+
+ 2. SEL_ARG GRAPH SEMANTICS
+
+ It represents a condition in a special form (we don't have a name for it ATM)
+ The SEL_ARG::next/prev is "OR", and next_key_part is "AND".
+
+ For example, the picture represents the condition in form:
+ (kp1 < 1 AND kp2=5 AND (kp3=10 OR kp3=12)) OR
+ (kp1=2 AND (kp3=11 OR kp3=14)) OR
+ (kp1=3 AND (kp3=11 OR kp3=14))
+
+
+ 3. SEL_ARG GRAPH USE
+
+ Use get_mm_tree() to construct SEL_ARG graph from WHERE condition.
+ Then walk the SEL_ARG graph and get a list of dijsoint ordered key
+ intervals (i.e. intervals in form
+
+ (constA1, .., const1_K) < (keypart1,.., keypartK) < (constB1, .., constB_K)
+
+ Those intervals can be used to access the index. The uses are in:
+ - check_quick_select() - Walk the SEL_ARG graph and find an estimate of
+ how many table records are contained within all
+ intervals.
+ - get_quick_select() - Walk the SEL_ARG, materialize the key intervals,
+ and create QUICK_RANGE_SELECT object that will
+ read records within these intervals.
+*/
+
class SEL_ARG :public Sql_alloc
{
public:
uint8 min_flag,max_flag,maybe_flag;
uint8 part; // Which key part
uint8 maybe_null;
- uint16 elements; // Elements in tree
- ulong use_count; // use of this sub_tree
+ /*
+ Number of children of this element in the RB-tree, plus 1 for this
+ element itself.
+ */
+ uint16 elements;
+ /*
+ Valid only for elements which are RB-tree roots: Number of times this
+ RB-tree is referred to (it is referred by SEL_ARG::next_key_part or by
+ SEL_TREE::keys[i] or by a temporary SEL_ARG* variable)
+ */
+ ulong use_count;
+
Field *field;
char *min_value,*max_value; // Pointer to range
- SEL_ARG *left,*right,*next,*prev,*parent,*next_key_part;
+ SEL_ARG *left,*right; /* R-B tree children */
+ SEL_ARG *next,*prev; /* Links for bi-directional interval list */
+ SEL_ARG *parent; /* R-B tree parent */
+ SEL_ARG *next_key_part;
enum leaf_color { BLACK,RED } color;
enum Type { IMPOSSIBLE, MAYBE, MAYBE_KEY, KEY_RANGE } type;
@@ -1430,6 +1531,7 @@ SEL_ARG *SEL_ARG::clone(SEL_ARG *new_parent,SEL_ARG **next_arg)
}
increment_use_count(1);
tmp->color= color;
+ tmp->elements= this->elements;
return tmp;
}
@@ -3140,7 +3242,12 @@ static bool create_partition_index_description(PART_PRUNE_PARAM *ppar)
ppar->is_part_keypart[part]= !in_subpart_fields;
ppar->is_subpart_keypart[part]= in_subpart_fields;
-
+
+ /*
+ Check if this was last field in this array, in this case we
+ switch to subpartitioning fields. (This will only happens if
+ there are subpartitioning fields to cater for).
+ */
if (!*(++field))
{
field= part_info->subpart_field_array;
@@ -5837,8 +5944,21 @@ and_all_keys(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag)
}
+/*
+ Produce a SEL_ARG graph that represents "key1 AND key2"
+
+ SYNOPSIS
+ key_and()
+ key1 First argument, root of its RB-tree
+ key2 Second argument, root of its RB-tree
+
+ RETURN
+ RB-tree root of the resulting SEL_ARG graph.
+ NULL if the result of AND operation is an empty interval {0}.
+*/
+
static SEL_ARG *
-key_and(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag)
+key_and(SEL_ARG *key1, SEL_ARG *key2, uint clone_flag)
{
if (!key1)
return key2;
@@ -5901,6 +6021,7 @@ key_and(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag)
if ((key1->min_flag | key2->min_flag) & GEOM_FLAG)
{
+ /* TODO: why not leave one of the trees? */
key1->free_tree();
key2->free_tree();
return 0; // Can't optimize this
@@ -6623,6 +6744,51 @@ int test_rb_tree(SEL_ARG *element,SEL_ARG *parent)
return -1; // Error, no more warnings
}
+
+/*
+ Count how many times SEL_ARG graph "root" refers to its part "key"
+
+ SYNOPSIS
+ count_key_part_usage()
+ root An RB-Root node in a SEL_ARG graph.
+ key Another RB-Root node in that SEL_ARG graph.
+
+ DESCRIPTION
+ The passed "root" node may refer to "key" node via root->next_key_part,
+ root->next->n
+
+ This function counts how many times the node "key" is referred (via
+ SEL_ARG::next_key_part) by
+ - intervals of RB-tree pointed by "root",
+ - intervals of RB-trees that are pointed by SEL_ARG::next_key_part from
+ intervals of RB-tree pointed by "root",
+ - and so on.
+
+ Here is an example (horizontal links represent next_key_part pointers,
+ vertical links - next/prev prev pointers):
+
+ +----+ $
+ |root|-----------------+
+ +----+ $ |
+ | $ |
+ | $ |
+ +----+ +---+ $ | +---+ Here the return value
+ | |- ... -| |---$-+--+->|key| will be 4.
+ +----+ +---+ $ | | +---+
+ | $ | |
+ ... $ | |
+ | $ | |
+ +----+ +---+ $ | |
+ | |---| |---------+ |
+ +----+ +---+ $ |
+ | | $ |
+ ... +---+ $ |
+ | |------------+
+ +---+ $
+ RETURN
+ Number of links to "key" from nodes reachable from "root".
+*/
+
static ulong count_key_part_usage(SEL_ARG *root, SEL_ARG *key)
{
ulong count= 0;
@@ -6640,6 +6806,20 @@ static ulong count_key_part_usage(SEL_ARG *root, SEL_ARG *key)
}
+/*
+ Check if SEL_ARG::use_count value is correct
+
+ SYNOPSIS
+ SEL_ARG::test_use_count()
+ root The root node of the SEL_ARG graph (an RB-tree root node that
+ has the least value of sel_arg->part in the entire graph, and
+ thus is the "origin" of the graph)
+
+ DESCRIPTION
+ Check if SEL_ARG::use_count value is correct. See the definition of
+ use_count for what is "correct".
+*/
+
void SEL_ARG::test_use_count(SEL_ARG *root)
{
uint e_count=0;
diff --git a/sql/opt_range.h b/sql/opt_range.h
index 85cedf663cd..6f26f9f782c 100644
--- a/sql/opt_range.h
+++ b/sql/opt_range.h
@@ -670,7 +670,7 @@ private:
#ifdef NOT_USED
bool test_if_null_range(QUICK_RANGE *range, uint used_key_parts);
#endif
- int reset(void) { next=0; rev_it.rewind(); return 0; }
+ int reset(void) { rev_it.rewind(); return QUICK_RANGE_SELECT::reset(); }
List<QUICK_RANGE> rev_ranges;
List_iterator<QUICK_RANGE> rev_it;
};
diff --git a/sql/rpl_filter.cc b/sql/rpl_filter.cc
index 143cd027b5f..c01b5189887 100644
--- a/sql/rpl_filter.cc
+++ b/sql/rpl_filter.cc
@@ -513,7 +513,7 @@ Rpl_filter::get_wild_ignore_table(String* str)
const char*
-Rpl_filter::get_rewrite_db(const char* db, uint32 *new_len)
+Rpl_filter::get_rewrite_db(const char* db, uint *new_len)
{
if (rewrite_db.is_empty() || !db)
return db;
diff --git a/sql/rpl_filter.h b/sql/rpl_filter.h
index 58d2b97c9c6..718fd401c56 100644
--- a/sql/rpl_filter.h
+++ b/sql/rpl_filter.h
@@ -70,7 +70,7 @@ public:
void get_wild_do_table(String* str);
void get_wild_ignore_table(String* str);
- const char* get_rewrite_db(const char* db, uint32 *new_len);
+ const char* get_rewrite_db(const char* db, uint *new_len);
I_List<i_string>* get_do_db();
I_List<i_string>* get_ignore_db();
diff --git a/sql/set_var.cc b/sql/set_var.cc
index b0ecc7eccef..a141e7eba39 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -109,7 +109,6 @@ extern ulong ndb_report_thresh_binlog_mem_usage;
-
static HASH system_variable_hash;
const char *bool_type_names[]= { "OFF", "ON", NullS };
TYPELIB bool_typelib=
@@ -631,6 +630,9 @@ static sys_var_thd_ha_rows sys_select_limit("sql_select_limit",
static sys_var_timestamp sys_timestamp("timestamp");
static sys_var_last_insert_id sys_last_insert_id("last_insert_id");
static sys_var_last_insert_id sys_identity("identity");
+
+static sys_var_thd_lc_time_names sys_lc_time_names("lc_time_names");
+
static sys_var_insert_id sys_insert_id("insert_id");
static sys_var_readonly sys_error_count("error_count",
OPT_SESSION,
@@ -871,6 +873,7 @@ SHOW_VAR init_vars[]= {
{"large_files_support", (char*) &opt_large_files, SHOW_BOOL},
{"large_page_size", (char*) &opt_large_page_size, SHOW_INT},
{"large_pages", (char*) &opt_large_pages, SHOW_MY_BOOL},
+ {sys_lc_time_names.name, (char*) &sys_lc_time_names, SHOW_SYS},
{sys_license.name, (char*) &sys_license, SHOW_SYS},
{sys_local_infile.name, (char*) &sys_local_infile, SHOW_SYS},
#ifdef HAVE_MLOCKALL
@@ -1097,7 +1100,7 @@ bool update_sys_var_str(sys_var_str *var_str, rw_lock_t *var_mutex,
uint new_length= (var ? var->value->str_value.length() : 0);
if (!old_value)
old_value= (char*) "";
- if (!(res= my_strndup((byte*)old_value, new_length, MYF(0))))
+ if (!(res= my_strndup(old_value, new_length, MYF(0))))
return 1;
/*
Replace the old value in such a way that the any thread using
@@ -1343,9 +1346,9 @@ bool sys_var_thd_binlog_format::is_readonly() const
return 1;
}
/*
- if in a stored function, it's too late to change mode
+ if in a stored function/trigger, it's too late to change mode
*/
- if (thd->spcont && thd->prelocked_mode)
+ if (thd->in_sub_stmt)
{
my_error(ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT, MYF(0));
return 1;
@@ -2632,7 +2635,7 @@ bool update_sys_var_str_path(THD *thd, sys_var_str *var_str,
old_value= make_default_log_name(buff, log_ext);
str_length= strlen(old_value);
}
- if (!(res= my_strndup((byte*)old_value, str_length, MYF(MY_FAE+MY_WME))))
+ if (!(res= my_strndup(old_value, str_length, MYF(MY_FAE+MY_WME))))
{
result= 1;
goto err;
@@ -2794,7 +2797,8 @@ byte *sys_var_timestamp::value_ptr(THD *thd, enum_var_type type,
bool sys_var_last_insert_id::update(THD *thd, set_var *var)
{
- thd->insert_id(var->save_result.ulonglong_value);
+ thd->first_successful_insert_id_in_prev_stmt=
+ var->save_result.ulonglong_value;
return 0;
}
@@ -2802,14 +2806,19 @@ bool sys_var_last_insert_id::update(THD *thd, set_var *var)
byte *sys_var_last_insert_id::value_ptr(THD *thd, enum_var_type type,
LEX_STRING *base)
{
- thd->sys_var_tmp.long_value= (long) thd->insert_id();
- return (byte*) &thd->last_insert_id;
+ /*
+ this tmp var makes it robust againt change of type of
+ read_first_successful_insert_id_in_prev_stmt().
+ */
+ thd->sys_var_tmp.ulonglong_value=
+ thd->read_first_successful_insert_id_in_prev_stmt();
+ return (byte*) &thd->sys_var_tmp.ulonglong_value;
}
bool sys_var_insert_id::update(THD *thd, set_var *var)
{
- thd->next_insert_id= var->save_result.ulonglong_value;
+ thd->force_one_auto_inc_interval(var->save_result.ulonglong_value);
return 0;
}
@@ -2817,7 +2826,9 @@ bool sys_var_insert_id::update(THD *thd, set_var *var)
byte *sys_var_insert_id::value_ptr(THD *thd, enum_var_type type,
LEX_STRING *base)
{
- return (byte*) &thd->current_insert_id;
+ thd->sys_var_tmp.ulonglong_value=
+ thd->auto_inc_intervals_forced.minimum();
+ return (byte*) &thd->sys_var_tmp.ulonglong_value;
}
@@ -3003,6 +3014,40 @@ byte *sys_var_max_user_conn::value_ptr(THD *thd, enum_var_type type,
return (byte*) &(max_user_connections);
}
+bool sys_var_thd_lc_time_names::check(THD *thd, set_var *var)
+{
+ char *locale_str =var->value->str_value.c_ptr();
+ MY_LOCALE *locale_match= my_locale_by_name(locale_str);
+
+ if (locale_match == NULL)
+ {
+ my_printf_error(ER_UNKNOWN_ERROR,
+ "Unknown locale: '%s'", MYF(0), locale_str);
+ return 1;
+ }
+ var->save_result.locale_value= locale_match;
+ return 0;
+}
+
+
+bool sys_var_thd_lc_time_names::update(THD *thd, set_var *var)
+{
+ thd->variables.lc_time_names= var->save_result.locale_value;
+ return 0;
+}
+
+
+byte *sys_var_thd_lc_time_names::value_ptr(THD *thd, enum_var_type type,
+ LEX_STRING *base)
+{
+ return (byte *)(thd->variables.lc_time_names->name);
+}
+
+
+void sys_var_thd_lc_time_names::set_default(THD *thd, enum_var_type type)
+{
+ thd->variables.lc_time_names = &my_locale_en_US;
+}
/*
Functions to update thd->options bits
diff --git a/sql/set_var.h b/sql/set_var.h
index d01ce833d14..a63bcc4a55d 100644
--- a/sql/set_var.h
+++ b/sql/set_var.h
@@ -28,6 +28,8 @@
class sys_var;
class set_var;
typedef struct system_variables SV;
+typedef struct my_locale_st MY_LOCALE;
+
extern TYPELIB bool_typelib, delay_key_write_typelib, sql_mode_typelib;
typedef int (*sys_check_func)(THD *, set_var *);
@@ -903,6 +905,25 @@ public:
};
+class sys_var_thd_lc_time_names :public sys_var_thd
+{
+public:
+ sys_var_thd_lc_time_names(const char *name_arg):
+ sys_var_thd(name_arg)
+ {}
+ bool check(THD *thd, set_var *var);
+ SHOW_TYPE type() { return SHOW_CHAR; }
+ bool check_update_type(Item_result type)
+ {
+ return type != STRING_RESULT; /* Only accept strings */
+ }
+ bool check_default(enum_var_type type) { return 0; }
+ bool update(THD *thd, set_var *var);
+ byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base);
+ virtual void set_default(THD *thd, enum_var_type type);
+};
+
+
class sys_var_event_scheduler :public sys_var_long_ptr
{
/* We need a derived class only to have a warn_deprecated() */
@@ -964,6 +985,7 @@ public:
handlerton *hton;
DATE_TIME_FORMAT *date_time_format;
Time_zone *time_zone;
+ MY_LOCALE *locale_value;
} save_result;
LEX_STRING base; /* for structs */
@@ -1055,7 +1077,7 @@ public:
uint name_length_arg, gptr data_arg)
:name_length(name_length_arg), data(data_arg)
{
- name= my_strndup((byte*) name_arg, name_length, MYF(MY_WME));
+ name= my_strndup(name_arg, name_length, MYF(MY_WME));
links->push_back(this);
}
inline bool cmp(const char *name_cmp, uint length)
diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt
index 8d2822370f2..a5fefc38c59 100644
--- a/sql/share/errmsg.txt
+++ b/sql/share/errmsg.txt
@@ -5802,6 +5802,8 @@ ER_FOREIGN_DUPLICATE_KEY 23000 S1009
eng "Upholding foreign key constraints for table '%.64s', entry '%-.64s', key %d would lead to a duplicate entry"
ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE
eng "Column count of mysql.%s is wrong. Expected %d, found %d. Created with MySQL %d, now running %d. Please use scripts/mysql_fix_privilege_tables"
+ER_REMOVED_SPACES
+ eng "Leading spaces are removed from name '%s'"
ER_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR
eng "Cannot switch out of the row-based binary log format when the session has open temporary tables"
ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT
@@ -5839,3 +5841,5 @@ ER_CANT_ACTIVATE_LOG
eng "Cannot activate '%-.64s' log."
ER_RBR_NOT_AVAILABLE
eng "The server was not built with row-based replication"
+ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA
+ eng "Triggers can not be created on system tables"
diff --git a/sql/slave.cc b/sql/slave.cc
index d9895323b92..19060eba2d4 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -63,14 +63,14 @@ static int count_relay_log_space(RELAY_LOG_INFO* rli);
static int init_slave_thread(THD* thd, SLAVE_THD_TYPE thd_type);
static int safe_connect(THD* thd, MYSQL* mysql, MASTER_INFO* mi);
static int safe_reconnect(THD* thd, MYSQL* mysql, MASTER_INFO* mi,
- bool suppress_warnings);
+ bool suppress_warnings);
static int connect_to_master(THD* thd, MYSQL* mysql, MASTER_INFO* mi,
- bool reconnect, bool suppress_warnings);
+ bool reconnect, bool suppress_warnings);
static int safe_sleep(THD* thd, int sec, CHECK_KILLED_FUNC thread_killed,
- void* thread_killed_arg);
+ void* thread_killed_arg);
static int request_table_dump(MYSQL* mysql, const char* db, const char* table);
static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db,
- const char* table_name, bool overwrite);
+ const char* table_name, bool overwrite);
static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi);
/*
@@ -78,17 +78,17 @@ static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi);
SYNOPSIS
init_thread_mask()
- mask Return value here
- mi master_info for slave
- inverse If set, returns which threads are not running
+ mask Return value here
+ mi master_info for slave
+ inverse If set, returns which threads are not running
IMPLEMENTATION
Get a bit mask for which threads are running so that we can later restart
these threads.
RETURN
- mask If inverse == 0, running threads
- If inverse == 1, stopped threads
+ mask If inverse == 0, running threads
+ If inverse == 1, stopped threads
*/
void init_thread_mask(int* mask,MASTER_INFO* mi,bool inverse)
@@ -168,7 +168,7 @@ int init_slave()
}
if (init_master_info(active_mi,master_info_file,relay_log_info_file,
- !master_host, (SLAVE_IO | SLAVE_SQL)))
+ !master_host, (SLAVE_IO | SLAVE_SQL)))
{
sql_print_error("Failed to initialize the master info structure");
goto err;
@@ -182,11 +182,11 @@ int init_slave()
if (master_host && !opt_skip_slave_start)
{
if (start_slave_threads(1 /* need mutex */,
- 0 /* no wait for start*/,
- active_mi,
- master_info_file,
- relay_log_info_file,
- SLAVE_IO | SLAVE_SQL))
+ 0 /* no wait for start*/,
+ active_mi,
+ master_info_file,
+ relay_log_info_file,
+ SLAVE_IO | SLAVE_SQL))
{
sql_print_error("Failed to create slave threads");
goto err;
@@ -206,12 +206,12 @@ err:
SYNOPSIS
init_relay_log_pos()
- rli Relay information (will be initialized)
- log Name of relay log file to read from. NULL = First log
- pos Position in relay log file
- need_data_lock Set to 1 if this functions should do mutex locks
- errmsg Store pointer to error message here
- look_for_description_event
+ rli Relay information (will be initialized)
+ log Name of relay log file to read from. NULL = First log
+ pos Position in relay log file
+ need_data_lock Set to 1 if this functions should do mutex locks
+ errmsg Store pointer to error message here
+ look_for_description_event
1 if we should look for such an event. We only need
this when the SQL thread starts and opens an existing
relay log and has to execute it (possibly from an
@@ -229,13 +229,13 @@ err:
- check proper initialization of group_master_log_name/group_master_log_pos
RETURN VALUES
- 0 ok
- 1 error. errmsg is set to point to the error message
+ 0 ok
+ 1 error. errmsg is set to point to the error message
*/
int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log,
- ulonglong pos, bool need_data_lock,
- const char** errmsg,
+ ulonglong pos, bool need_data_lock,
+ const char** errmsg,
bool look_for_description_event)
{
DBUG_ENTER("init_relay_log_pos");
@@ -243,7 +243,7 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log,
*errmsg=0;
pthread_mutex_t *log_lock=rli->relay_log.get_log_lock();
-
+
if (need_data_lock)
pthread_mutex_lock(&rli->data_lock);
@@ -260,13 +260,13 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log,
By default the relay log is in binlog format 3 (4.0).
Even if format is 4, this will work enough to read the first event
(Format_desc) (remember that format 4 is just lenghtened compared to format
- 3; format 3 is a prefix of format 4).
+ 3; format 3 is a prefix of format 4).
*/
rli->relay_log.description_event_for_exec= new
Format_description_log_event(3);
-
+
pthread_mutex_lock(log_lock);
-
+
/* Close log file and free buffers if it's already open */
if (rli->cur_log_fd >= 0)
{
@@ -274,7 +274,7 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log,
my_close(rli->cur_log_fd, MYF(MY_WME));
rli->cur_log_fd = -1;
}
-
+
rli->group_relay_log_pos = rli->event_relay_log_pos = pos;
/*
@@ -293,9 +293,9 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log,
goto err;
}
strmake(rli->group_relay_log_name,rli->linfo.log_file_name,
- sizeof(rli->group_relay_log_name)-1);
+ sizeof(rli->group_relay_log_name)-1);
strmake(rli->event_relay_log_name,rli->linfo.log_file_name,
- sizeof(rli->event_relay_log_name)-1);
+ sizeof(rli->event_relay_log_name)-1);
if (rli->relay_log.is_active(rli->linfo.log_file_name))
{
/*
@@ -314,7 +314,7 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log,
Open the relay log and set rli->cur_log to point at this one
*/
if ((rli->cur_log_fd=open_binlog(&rli->cache_buf,
- rli->linfo.log_file_name,errmsg)) < 0)
+ rli->linfo.log_file_name,errmsg)) < 0)
goto err;
rli->cur_log = &rli->cache_buf;
}
@@ -325,7 +325,7 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log,
if (pos > BIN_LOG_HEADER_SIZE) /* If pos<=4, we stay at 4 */
{
Log_event* ev;
- while (look_for_description_event)
+ while (look_for_description_event)
{
/*
Read the possible Format_description_log_event; if position
@@ -378,7 +378,7 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log,
or Format_desc.
*/
}
- else
+ else
{
DBUG_PRINT("info",("found event of another type=%d",
ev->get_type_code()));
@@ -391,7 +391,7 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log,
{
char llbuf1[22], llbuf2[22];
DBUG_PRINT("info", ("my_b_tell(rli->cur_log)=%s rli->event_relay_log_pos=%s",
- llstr(my_b_tell(rli->cur_log),llbuf1),
+ llstr(my_b_tell(rli->cur_log),llbuf1),
llstr(rli->event_relay_log_pos,llbuf2)));
}
#endif
@@ -406,7 +406,7 @@ err:
if (!relay_log_purge)
rli->log_space_limit= 0;
pthread_cond_broadcast(&rli->data_cond);
-
+
pthread_mutex_unlock(log_lock);
if (need_data_lock)
@@ -423,7 +423,7 @@ err:
SYNOPSIS
init_slave_skip_errors()
- arg List of errors numbers to skip, separated with ','
+ arg List of errors numbers to skip, separated with ','
NOTES
Called from get_options() in mysqld.cc on start-up
@@ -462,7 +462,7 @@ void init_slave_skip_errors(const char* arg)
void st_relay_log_info::inc_group_relay_log_pos(ulonglong log_pos,
- bool skip_lock)
+ bool skip_lock)
{
DBUG_ENTER("st_relay_log_info::inc_group_relay_log_pos");
@@ -471,10 +471,10 @@ void st_relay_log_info::inc_group_relay_log_pos(ulonglong log_pos,
inc_event_relay_log_pos();
group_relay_log_pos= event_relay_log_pos;
strmake(group_relay_log_name,event_relay_log_name,
- sizeof(group_relay_log_name)-1);
+ sizeof(group_relay_log_name)-1);
notify_group_relay_log_name_update();
-
+
/*
If the slave does not support transactions and replicates a transaction,
users should not trust group_master_log_pos (which they can display with
@@ -506,7 +506,7 @@ void st_relay_log_info::inc_group_relay_log_pos(ulonglong log_pos,
With the end_log_pos solution, we avoid computations involving lengthes.
*/
DBUG_PRINT("info", ("log_pos: %lu group_master_log_pos: %lu",
- (long) log_pos, (long) group_master_log_pos));
+ (long) log_pos, (long) group_master_log_pos));
if (log_pos) // 3.23 binlogs don't have log_posx
{
group_master_log_pos= log_pos;
@@ -546,7 +546,7 @@ void st_relay_log_info::close_temporary_tables()
*/
int purge_relay_logs(RELAY_LOG_INFO* rli, THD *thd, bool just_reset,
- const char** errmsg)
+ const char** errmsg)
{
int error=0;
DBUG_ENTER("purge_relay_logs");
@@ -584,10 +584,10 @@ int purge_relay_logs(RELAY_LOG_INFO* rli, THD *thd, bool just_reset,
rli->slave_skip_counter=0;
pthread_mutex_lock(&rli->data_lock);
- /*
- we close the relay log fd possibly left open by the slave SQL thread,
+ /*
+ we close the relay log fd possibly left open by the slave SQL thread,
to be able to delete it; the relay log fd possibly left open by the slave
- I/O thread will be closed naturally in reset_logs() by the
+ I/O thread will be closed naturally in reset_logs() by the
close(LOG_CLOSE_TO_BE_OPENED) call
*/
if (rli->cur_log_fd >= 0)
@@ -605,9 +605,9 @@ int purge_relay_logs(RELAY_LOG_INFO* rli, THD *thd, bool just_reset,
}
/* Save name of used relay log file */
strmake(rli->group_relay_log_name, rli->relay_log.get_log_fname(),
- sizeof(rli->group_relay_log_name)-1);
+ sizeof(rli->group_relay_log_name)-1);
strmake(rli->event_relay_log_name, rli->relay_log.get_log_fname(),
- sizeof(rli->event_relay_log_name)-1);
+ sizeof(rli->event_relay_log_name)-1);
rli->group_relay_log_pos= rli->event_relay_log_pos= BIN_LOG_HEADER_SIZE;
if (count_relay_log_space(rli))
{
@@ -617,12 +617,12 @@ int purge_relay_logs(RELAY_LOG_INFO* rli, THD *thd, bool just_reset,
if (!just_reset)
error= init_relay_log_pos(rli, rli->group_relay_log_name,
rli->group_relay_log_pos,
- 0 /* do not need data lock */, errmsg, 0);
-
+ 0 /* do not need data lock */, errmsg, 0);
+
err:
#ifndef DBUG_OFF
char buf[22];
-#endif
+#endif
DBUG_PRINT("info",("log_space_total: %s",llstr(rli->log_space_total,buf)));
pthread_mutex_unlock(&rli->data_lock);
DBUG_RETURN(error);
@@ -641,7 +641,7 @@ int terminate_slave_threads(MASTER_INFO* mi,int thread_mask,bool skip_lock)
sql_cond_lock=sql_lock;
io_cond_lock=io_lock;
-
+
if (skip_lock)
{
sql_lock = io_lock = 0;
@@ -651,10 +651,10 @@ int terminate_slave_threads(MASTER_INFO* mi,int thread_mask,bool skip_lock)
DBUG_PRINT("info",("Terminating IO thread"));
mi->abort_slave=1;
if ((error=terminate_slave_thread(mi->io_thd,io_lock,
- io_cond_lock,
- &mi->stop_cond,
- &mi->slave_running)) &&
- !force_all)
+ io_cond_lock,
+ &mi->stop_cond,
+ &mi->slave_running)) &&
+ !force_all)
DBUG_RETURN(error);
}
if ((thread_mask & (SLAVE_SQL|SLAVE_FORCE_ALL)) && mi->rli.slave_running)
@@ -663,10 +663,10 @@ int terminate_slave_threads(MASTER_INFO* mi,int thread_mask,bool skip_lock)
DBUG_ASSERT(mi->rli.sql_thd != 0) ;
mi->rli.abort_slave=1;
if ((error=terminate_slave_thread(mi->rli.sql_thd,sql_lock,
- sql_cond_lock,
- &mi->rli.stop_cond,
- &mi->rli.slave_running)) &&
- !force_all)
+ sql_cond_lock,
+ &mi->rli.stop_cond,
+ &mi->rli.slave_running)) &&
+ !force_all)
DBUG_RETURN(error);
}
DBUG_RETURN(0);
@@ -674,9 +674,9 @@ int terminate_slave_threads(MASTER_INFO* mi,int thread_mask,bool skip_lock)
int terminate_slave_thread(THD* thd, pthread_mutex_t* term_lock,
- pthread_mutex_t *cond_lock,
- pthread_cond_t* term_cond,
- volatile uint *slave_running)
+ pthread_mutex_t *cond_lock,
+ pthread_cond_t* term_cond,
+ volatile uint *slave_running)
{
DBUG_ENTER("terminate_slave_thread");
if (term_lock)
@@ -695,7 +695,7 @@ int terminate_slave_thread(THD* thd, pthread_mutex_t* term_lock,
be referening freed memory trying to kick it
*/
- while (*slave_running) // Should always be true
+ while (*slave_running) // Should always be true
{
DBUG_PRINT("loop", ("killing slave thread"));
KICK_SLAVE(thd);
@@ -714,11 +714,11 @@ int terminate_slave_thread(THD* thd, pthread_mutex_t* term_lock,
int start_slave_thread(pthread_handler h_func, pthread_mutex_t *start_lock,
- pthread_mutex_t *cond_lock,
- pthread_cond_t *start_cond,
- volatile uint *slave_running,
- volatile ulong *slave_run_id,
- MASTER_INFO* mi,
+ pthread_mutex_t *cond_lock,
+ pthread_cond_t *start_cond,
+ volatile uint *slave_running,
+ volatile ulong *slave_run_id,
+ MASTER_INFO* mi,
bool high_priority)
{
pthread_t th;
@@ -738,7 +738,7 @@ int start_slave_thread(pthread_handler h_func, pthread_mutex_t *start_lock,
sql_print_error("Server id not set, will not start slave");
DBUG_RETURN(ER_BAD_SLAVE);
}
-
+
if (*slave_running)
{
if (start_cond)
@@ -764,12 +764,12 @@ int start_slave_thread(pthread_handler h_func, pthread_mutex_t *start_lock,
{
DBUG_PRINT("sleep",("Waiting for slave thread to start"));
const char* old_msg = thd->enter_cond(start_cond,cond_lock,
- "Waiting for slave thread to start");
+ "Waiting for slave thread to start");
pthread_cond_wait(start_cond,cond_lock);
thd->exit_cond(old_msg);
pthread_mutex_lock(cond_lock); // re-acquire it as exit_cond() released
if (thd->killed)
- DBUG_RETURN(thd->killed_errno());
+ DBUG_RETURN(thd->killed_errno());
}
}
if (start_lock)
@@ -788,14 +788,14 @@ int start_slave_thread(pthread_handler h_func, pthread_mutex_t *start_lock,
*/
int start_slave_threads(bool need_slave_mutex, bool wait_for_start,
- MASTER_INFO* mi, const char* master_info_fname,
- const char* slave_info_fname, int thread_mask)
+ MASTER_INFO* mi, const char* master_info_fname,
+ const char* slave_info_fname, int thread_mask)
{
pthread_mutex_t *lock_io=0,*lock_sql=0,*lock_cond_io=0,*lock_cond_sql=0;
pthread_cond_t* cond_io=0,*cond_sql=0;
int error=0;
DBUG_ENTER("start_slave_threads");
-
+
if (need_slave_mutex)
{
lock_io = &mi->run_lock;
@@ -811,15 +811,15 @@ int start_slave_threads(bool need_slave_mutex, bool wait_for_start,
if (thread_mask & SLAVE_IO)
error=start_slave_thread(handle_slave_io,lock_io,lock_cond_io,
- cond_io,
- &mi->slave_running, &mi->slave_run_id,
- mi, 1); //high priority, to read the most possible
+ cond_io,
+ &mi->slave_running, &mi->slave_run_id,
+ mi, 1); //high priority, to read the most possible
if (!error && (thread_mask & SLAVE_SQL))
{
error=start_slave_thread(handle_slave_sql,lock_sql,lock_cond_sql,
- cond_sql,
- &mi->rli.slave_running, &mi->rli.slave_run_id,
- mi, 0);
+ cond_sql,
+ &mi->rli.slave_running, &mi->rli.slave_run_id,
+ mi, 0);
if (error)
terminate_slave_threads(mi, thread_mask & SLAVE_IO, 0);
}
@@ -997,8 +997,8 @@ void skip_load_data_infile(NET *net)
DBUG_ENTER("skip_load_data_infile");
(void)net_request_file(net, "/dev/null");
- (void)my_net_read(net); // discard response
- (void)net_write_command(net, 0, "", 0, "", 0); // Send ok
+ (void)my_net_read(net); // discard response
+ (void)net_write_command(net, 0, "", 0, "", 0); // Send ok
DBUG_VOID_RETURN;
}
@@ -1024,7 +1024,7 @@ const char *print_slave_db_safe(const char* db)
}
static int init_strvar_from_file(char *var, int max_size, IO_CACHE *f,
- const char *default_val)
+ const char *default_val)
{
uint length;
DBUG_ENTER("init_strvar_from_file");
@@ -1037,8 +1037,8 @@ static int init_strvar_from_file(char *var, int max_size, IO_CACHE *f,
else
{
/*
- If we truncated a line or stopped on last char, remove all chars
- up to and including newline.
+ If we truncated a line or stopped on last char, remove all chars
+ up to and including newline.
*/
int c;
while (((c=my_b_get(f)) != '\n' && c != my_b_EOF));
@@ -1059,8 +1059,8 @@ static int init_intvar_from_file(int* var, IO_CACHE* f, int default_val)
char buf[32];
DBUG_ENTER("init_intvar_from_file");
-
- if (my_b_gets(f, buf, sizeof(buf)))
+
+ if (my_b_gets(f, buf, sizeof(buf)))
{
*var = atoi(buf);
DBUG_RETURN(0);
@@ -1081,7 +1081,7 @@ static int init_intvar_from_file(int* var, IO_CACHE* f, int default_val)
when people upgrade a 3.23 master to 4.0 without doing RESET MASTER: 4.0
slaves are fooled. So we do this only to distinguish between 3.23 and more
recent masters (it's too late to change things for 3.23).
-
+
RETURNS
0 ok
1 error
@@ -1098,7 +1098,7 @@ static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi)
*/
delete mi->rli.relay_log.description_event_for_queue;
mi->rli.relay_log.description_event_for_queue= 0;
-
+
if (!my_isdigit(&my_charset_bin,*mysql->server_version))
errmsg = "Master reported unrecognized MySQL version";
else
@@ -1106,7 +1106,7 @@ static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi)
/*
Note the following switch will bug when we have MySQL branch 30 ;)
*/
- switch (*mysql->server_version)
+ switch (*mysql->server_version)
{
case '0':
case '1':
@@ -1115,13 +1115,13 @@ static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi)
break;
case '3':
mi->rli.relay_log.description_event_for_queue= new
- Format_description_log_event(1, mysql->server_version);
+ Format_description_log_event(1, mysql->server_version);
break;
case '4':
mi->rli.relay_log.description_event_for_queue= new
- Format_description_log_event(3, mysql->server_version);
+ Format_description_log_event(3, mysql->server_version);
break;
- default:
+ default:
/*
Master is MySQL >=5.0. Give a default Format_desc event, so that we can
take the early steps (like tests for "is this a 3.23 master") which we
@@ -1131,18 +1131,18 @@ static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi)
master is 3.23, 4.0, etc.
*/
mi->rli.relay_log.description_event_for_queue= new
- Format_description_log_event(4, mysql->server_version);
+ Format_description_log_event(4, mysql->server_version);
break;
}
}
-
- /*
+
+ /*
This does not mean that a 5.0 slave will be able to read a 6.0 master; but
as we don't know yet, we don't want to forbid this for now. If a 5.0 slave
can't read a 6.0 master, this will show up when the slave can't read some
events sent by the master, and there will be error messages.
*/
-
+
if (errmsg)
{
sql_print_error(errmsg);
@@ -1162,12 +1162,12 @@ static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi)
*/
MYSQL_RES *master_res= 0;
MYSQL_ROW master_row;
-
+
if (!mysql_real_query(mysql, STRING_WITH_LEN("SELECT UNIX_TIMESTAMP()")) &&
(master_res= mysql_store_result(mysql)) &&
(master_row= mysql_fetch_row(master_res)))
{
- mi->clock_diff_with_master=
+ mi->clock_diff_with_master=
(long) (time((time_t*) 0) - strtoul(master_row[0], 0, 10));
}
else
@@ -1177,8 +1177,8 @@ static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi)
do not trust column Seconds_Behind_Master of SHOW SLAVE STATUS");
}
if (master_res)
- mysql_free_result(master_res);
-
+ mysql_free_result(master_res);
+
/*
Check that the master's server id and ours are different. Because if they
are equal (which can result from a simple copy of master's datadir to slave,
@@ -1245,9 +1245,9 @@ be equal for replication to work";
time and so could differ for slave and master even if they are really
in the same system time zone. So we are omiting this check and just
relying on documentation. Also according to Monty there are many users
- who are using replication between servers in various time zones. Hence
- such check will broke everything for them. (And now everything will
- work for them because by default both their master and slave will have
+ who are using replication between servers in various time zones. Hence
+ such check will broke everything for them. (And now everything will
+ work for them because by default both their master and slave will have
'SYSTEM' time zone).
This check is only necessary for 4.x masters (and < 5.0.4 masters but
those were alpha).
@@ -1257,7 +1257,7 @@ be equal for replication to work";
(master_res= mysql_store_result(mysql)))
{
if ((master_row= mysql_fetch_row(master_res)) &&
- strcmp(master_row[0],
+ strcmp(master_row[0],
global_system_variables.time_zone->get_name()->ptr()))
errmsg= "The slave I/O thread stops because master and slave have \
different values for the TIME_ZONE global variable. The values must \
@@ -1287,7 +1287,7 @@ err:
*/
static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db,
- const char* table_name, bool overwrite)
+ const char* table_name, bool overwrite)
{
ulong packet_len;
char *query, *save_db;
@@ -1309,10 +1309,10 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db,
}
if (net->read_pos[0] == 255) // error from master
{
- char *err_msg;
+ char *err_msg;
err_msg= (char*) net->read_pos + ((mysql->server_capabilities &
- CLIENT_PROTOCOL_41) ?
- 3+SQLSTATE_LENGTH+1 : 3);
+ CLIENT_PROTOCOL_41) ?
+ 3+SQLSTATE_LENGTH+1 : 3);
my_error(ER_MASTER, MYF(0), err_msg);
DBUG_RETURN(1);
}
@@ -1351,12 +1351,12 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db,
DBUG_ASSERT(thd->db != 0);
thd->db_length= strlen(thd->db);
mysql_parse(thd, thd->query, packet_len); // run create table
- thd->db = save_db; // leave things the way the were before
+ thd->db = save_db; // leave things the way the were before
thd->db_length= save_db_length;
thd->options = save_options;
-
+
if (thd->query_error)
- goto err; // mysql_parse took care of the error send
+ goto err; // mysql_parse took care of the error send
thd->proc_info = "Opening master dump table";
tables.lock_type = TL_WRITE;
@@ -1365,7 +1365,7 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db,
sql_print_error("create_table_from_dump: could not open created table");
goto err;
}
-
+
file = tables.table->file;
thd->proc_info = "Reading master dump table data";
/* Copy the data file */
@@ -1396,22 +1396,22 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db,
err:
close_thread_tables(thd);
thd->net.no_send_ok = 0;
- DBUG_RETURN(error);
+ DBUG_RETURN(error);
}
int fetch_master_table(THD *thd, const char *db_name, const char *table_name,
- MASTER_INFO *mi, MYSQL *mysql, bool overwrite)
+ MASTER_INFO *mi, MYSQL *mysql, bool overwrite)
{
int error= 1;
const char *errmsg=0;
bool called_connected= (mysql != NULL);
DBUG_ENTER("fetch_master_table");
DBUG_PRINT("enter", ("db_name: '%s' table_name: '%s'",
- db_name,table_name));
+ db_name,table_name));
if (!called_connected)
- {
+ {
if (!(mysql = mysql_init(NULL)))
{
DBUG_RETURN(1);
@@ -1442,7 +1442,7 @@ int fetch_master_table(THD *thd, const char *db_name, const char *table_name,
goto err;
}
if (create_table_from_dump(thd, mysql, db_name,
- table_name, overwrite))
+ table_name, overwrite))
goto err; // create_table_from_dump have sent the error already
error = 0;
@@ -1452,7 +1452,7 @@ int fetch_master_table(THD *thd, const char *db_name, const char *table_name,
mysql_close(mysql);
if (errmsg && thd->vio_ok())
my_message(error, errmsg, MYF(0));
- DBUG_RETURN(test(error)); // Return 1 on error
+ DBUG_RETURN(test(error)); // Return 1 on error
}
@@ -1568,44 +1568,44 @@ file '%s', errno %d)", fname, my_errno);
goto err;
}
if (init_io_cache(&rli->info_file, info_fd, IO_SIZE*2, READ_CACHE, 0L,0,
- MYF(MY_WME)))
+ MYF(MY_WME)))
{
sql_print_error("Failed to create a cache on relay log info file '%s'",
- fname);
+ fname);
msg= current_thd->net.last_error;
goto err;
}
/* Init relay log with first entry in the relay index file */
if (init_relay_log_pos(rli,NullS,BIN_LOG_HEADER_SIZE,0 /* no data lock */,
- &msg, 0))
+ &msg, 0))
{
sql_print_error("Failed to open the relay log 'FIRST' (relay_log_pos 4)");
goto err;
}
rli->group_master_log_name[0]= 0;
- rli->group_master_log_pos= 0;
+ rli->group_master_log_pos= 0;
rli->info_fd= info_fd;
}
else // file exists
{
if (info_fd >= 0)
reinit_io_cache(&rli->info_file, READ_CACHE, 0L,0,0);
- else
+ else
{
int error=0;
if ((info_fd = my_open(fname, O_RDWR|O_BINARY, MYF(MY_WME))) < 0)
{
sql_print_error("\
Failed to open the existing relay log info file '%s' (errno %d)",
- fname, my_errno);
+ fname, my_errno);
error= 1;
}
else if (init_io_cache(&rli->info_file, info_fd,
IO_SIZE*2, READ_CACHE, 0L, 0, MYF(MY_WME)))
{
sql_print_error("Failed to create a cache on relay log info file '%s'",
- fname);
+ fname);
error= 1;
}
if (error)
@@ -1618,16 +1618,16 @@ Failed to open the existing relay log info file '%s' (errno %d)",
DBUG_RETURN(1);
}
}
-
+
rli->info_fd = info_fd;
int relay_log_pos, master_log_pos;
if (init_strvar_from_file(rli->group_relay_log_name,
- sizeof(rli->group_relay_log_name),
+ sizeof(rli->group_relay_log_name),
&rli->info_file, "") ||
init_intvar_from_file(&relay_log_pos,
- &rli->info_file, BIN_LOG_HEADER_SIZE) ||
+ &rli->info_file, BIN_LOG_HEADER_SIZE) ||
init_strvar_from_file(rli->group_master_log_name,
- sizeof(rli->group_master_log_name),
+ sizeof(rli->group_master_log_name),
&rli->info_file, "") ||
init_intvar_from_file(&master_log_pos, &rli->info_file, 0))
{
@@ -1640,15 +1640,15 @@ Failed to open the existing relay log info file '%s' (errno %d)",
rli->group_master_log_pos= master_log_pos;
if (init_relay_log_pos(rli,
- rli->group_relay_log_name,
- rli->group_relay_log_pos,
- 0 /* no data lock*/,
- &msg, 0))
+ rli->group_relay_log_name,
+ rli->group_relay_log_pos,
+ 0 /* no data lock*/,
+ &msg, 0))
{
char llbuf[22];
sql_print_error("Failed to open the relay log '%s' (relay_log_pos %s)",
- rli->group_relay_log_name,
- llstr(rli->group_relay_log_pos, llbuf));
+ rli->group_relay_log_name,
+ llstr(rli->group_relay_log_pos, llbuf));
goto err;
}
}
@@ -1657,7 +1657,7 @@ Failed to open the existing relay log info file '%s' (errno %d)",
{
char llbuf1[22], llbuf2[22];
DBUG_PRINT("info", ("my_b_tell(rli->cur_log)=%s rli->event_relay_log_pos=%s",
- llstr(my_b_tell(rli->cur_log),llbuf1),
+ llstr(my_b_tell(rli->cur_log),llbuf1),
llstr(rli->event_relay_log_pos,llbuf2)));
DBUG_ASSERT(rli->event_relay_log_pos >= BIN_LOG_HEADER_SIZE);
DBUG_ASSERT(my_b_tell(rli->cur_log) == rli->event_relay_log_pos);
@@ -1699,14 +1699,14 @@ static inline int add_relay_log(RELAY_LOG_INFO* rli,LOG_INFO* linfo)
if (!my_stat(linfo->log_file_name,&s,MYF(0)))
{
sql_print_error("log %s listed in the index, but failed to stat",
- linfo->log_file_name);
+ linfo->log_file_name);
DBUG_RETURN(1);
}
rli->log_space_total += s.st_size;
#ifndef DBUG_OFF
char buf[22];
DBUG_PRINT("info",("log_space_total: %s", llstr(rli->log_space_total,buf)));
-#endif
+#endif
DBUG_RETURN(0);
}
@@ -1721,11 +1721,11 @@ static bool wait_for_relay_log_space(RELAY_LOG_INFO* rli)
pthread_mutex_lock(&rli->log_space_lock);
save_proc_info= thd->enter_cond(&rli->log_space_cond,
- &rli->log_space_lock,
- "\
+ &rli->log_space_lock,
+ "\
Waiting for the slave SQL thread to free enough relay log space");
while (rli->log_space_limit < rli->log_space_total &&
- !(slave_killed=io_slave_killed(thd,mi)) &&
+ !(slave_killed=io_slave_killed(thd,mi)) &&
!rli->ignore_log_space_limit)
pthread_cond_wait(&rli->log_space_cond, &rli->log_space_lock);
thd->exit_cond(save_proc_info);
@@ -1748,9 +1748,9 @@ static int count_relay_log_space(RELAY_LOG_INFO* rli)
if (add_relay_log(rli,&linfo))
DBUG_RETURN(1);
} while (!rli->relay_log.find_next_log(&linfo, 1));
- /*
+ /*
As we have counted everything, including what may have written in a
- preceding write, we must reset bytes_written, or we may count some space
+ preceding write, we must reset bytes_written, or we may count some space
twice.
*/
rli->relay_log.reset_bytes_written();
@@ -1816,8 +1816,8 @@ void init_master_info_with_options(MASTER_INFO* mi)
DBUG_ENTER("init_master_info_with_options");
mi->master_log_name[0] = 0;
- mi->master_log_pos = BIN_LOG_HEADER_SIZE; // skip magic number
-
+ mi->master_log_pos = BIN_LOG_HEADER_SIZE; // skip magic number
+
if (master_host)
strmake(mi->host, master_host, sizeof(mi->host) - 1);
if (master_user)
@@ -1826,7 +1826,7 @@ void init_master_info_with_options(MASTER_INFO* mi)
strmake(mi->password, master_password, MAX_PASSWORD_LENGTH);
mi->port = master_port;
mi->connect_retry = master_connect_retry;
-
+
mi->ssl= master_ssl;
if (master_ssl_ca)
strmake(mi->ssl_ca, master_ssl_ca, sizeof(mi->ssl_ca)-1);
@@ -1935,7 +1935,7 @@ file '%s', errno %d)", fname, my_errno);
goto err;
}
if (init_io_cache(&mi->file, fd, IO_SIZE*2, READ_CACHE, 0L,0,
- MYF(MY_WME)))
+ MYF(MY_WME)))
{
sql_print_error("Failed to create a cache on master info file (\
file '%s')", fname);
@@ -1995,8 +1995,8 @@ file '%s')", fname);
overwritten by the second row later.
*/
if (init_strvar_from_file(mi->master_log_name,
- sizeof(mi->master_log_name), &mi->file,
- ""))
+ sizeof(mi->master_log_name), &mi->file,
+ ""))
goto errwithmsg;
lines= strtoul(mi->master_log_name, &first_non_digit, 10);
@@ -2012,15 +2012,15 @@ file '%s')", fname);
lines= 7;
if (init_intvar_from_file(&master_log_pos, &mi->file, 4) ||
- init_strvar_from_file(mi->host, sizeof(mi->host), &mi->file,
- master_host) ||
- init_strvar_from_file(mi->user, sizeof(mi->user), &mi->file,
- master_user) ||
+ init_strvar_from_file(mi->host, sizeof(mi->host), &mi->file,
+ master_host) ||
+ init_strvar_from_file(mi->user, sizeof(mi->user), &mi->file,
+ master_user) ||
init_strvar_from_file(mi->password, SCRAMBLED_PASSWORD_CHAR_LENGTH+1,
&mi->file, master_password) ||
- init_intvar_from_file(&port, &mi->file, master_port) ||
- init_intvar_from_file(&connect_retry, &mi->file,
- master_connect_retry))
+ init_intvar_from_file(&port, &mi->file, master_port) ||
+ init_intvar_from_file(&connect_retry, &mi->file,
+ master_connect_retry))
goto errwithmsg;
/*
@@ -2059,8 +2059,8 @@ file '%s')", fname);
mi->ssl= (my_bool) ssl;
}
DBUG_PRINT("master_info",("log_file_name: %s position: %ld",
- mi->master_log_name,
- (ulong) mi->master_log_pos));
+ mi->master_log_name,
+ (ulong) mi->master_log_pos));
mi->rli.mi = mi;
if (init_relay_log_info(&mi->rli, slave_info_fname))
@@ -2105,23 +2105,23 @@ int register_slave_on_master(MYSQL* mysql)
/* 30 is a good safety margin */
if (report_host_len + report_user_len + report_password_len + 30 >
sizeof(buf))
- DBUG_RETURN(0); // safety
+ DBUG_RETURN(0); // safety
int4store(pos, server_id); pos+= 4;
- pos= net_store_data(pos, report_host, report_host_len);
+ pos= net_store_data(pos, report_host, report_host_len);
pos= net_store_data(pos, report_user, report_user_len);
pos= net_store_data(pos, report_password, report_password_len);
int2store(pos, (uint16) report_port); pos+= 2;
- int4store(pos, rpl_recovery_rank); pos+= 4;
+ int4store(pos, rpl_recovery_rank); pos+= 4;
/* The master will fill in master_id */
- int4store(pos, 0); pos+= 4;
+ int4store(pos, 0); pos+= 4;
if (simple_command(mysql, COM_REGISTER_SLAVE, (char*) buf,
- (uint) (pos- buf), 0))
+ (uint) (pos- buf), 0))
{
sql_print_error("Error on COM_REGISTER_SLAVE: %d '%s'",
- mysql_errno(mysql),
- mysql_error(mysql));
+ mysql_errno(mysql),
+ mysql_error(mysql));
DBUG_RETURN(1);
}
DBUG_RETURN(0);
@@ -2136,25 +2136,25 @@ bool show_master_info(THD* thd, MASTER_INFO* mi)
DBUG_ENTER("show_master_info");
field_list.push_back(new Item_empty_string("Slave_IO_State",
- 14));
+ 14));
field_list.push_back(new Item_empty_string("Master_Host",
- sizeof(mi->host)));
+ sizeof(mi->host)));
field_list.push_back(new Item_empty_string("Master_User",
- sizeof(mi->user)));
+ sizeof(mi->user)));
field_list.push_back(new Item_return_int("Master_Port", 7,
- MYSQL_TYPE_LONG));
+ MYSQL_TYPE_LONG));
field_list.push_back(new Item_return_int("Connect_Retry", 10,
- MYSQL_TYPE_LONG));
+ MYSQL_TYPE_LONG));
field_list.push_back(new Item_empty_string("Master_Log_File",
- FN_REFLEN));
+ FN_REFLEN));
field_list.push_back(new Item_return_int("Read_Master_Log_Pos", 10,
- MYSQL_TYPE_LONGLONG));
+ MYSQL_TYPE_LONGLONG));
field_list.push_back(new Item_empty_string("Relay_Log_File",
- FN_REFLEN));
+ FN_REFLEN));
field_list.push_back(new Item_return_int("Relay_Log_Pos", 10,
- MYSQL_TYPE_LONGLONG));
+ MYSQL_TYPE_LONGLONG));
field_list.push_back(new Item_empty_string("Relay_Master_Log_File",
- FN_REFLEN));
+ FN_REFLEN));
field_list.push_back(new Item_empty_string("Slave_IO_Running", 3));
field_list.push_back(new Item_empty_string("Slave_SQL_Running", 3));
field_list.push_back(new Item_empty_string("Replicate_Do_DB", 20));
@@ -2163,33 +2163,33 @@ bool show_master_info(THD* thd, MASTER_INFO* mi)
field_list.push_back(new Item_empty_string("Replicate_Ignore_Table", 23));
field_list.push_back(new Item_empty_string("Replicate_Wild_Do_Table", 24));
field_list.push_back(new Item_empty_string("Replicate_Wild_Ignore_Table",
- 28));
+ 28));
field_list.push_back(new Item_return_int("Last_Errno", 4, MYSQL_TYPE_LONG));
field_list.push_back(new Item_empty_string("Last_Error", 20));
field_list.push_back(new Item_return_int("Skip_Counter", 10,
- MYSQL_TYPE_LONG));
+ MYSQL_TYPE_LONG));
field_list.push_back(new Item_return_int("Exec_Master_Log_Pos", 10,
- MYSQL_TYPE_LONGLONG));
+ MYSQL_TYPE_LONGLONG));
field_list.push_back(new Item_return_int("Relay_Log_Space", 10,
- MYSQL_TYPE_LONGLONG));
+ MYSQL_TYPE_LONGLONG));
field_list.push_back(new Item_empty_string("Until_Condition", 6));
field_list.push_back(new Item_empty_string("Until_Log_File", FN_REFLEN));
- field_list.push_back(new Item_return_int("Until_Log_Pos", 10,
+ field_list.push_back(new Item_return_int("Until_Log_Pos", 10,
MYSQL_TYPE_LONGLONG));
field_list.push_back(new Item_empty_string("Master_SSL_Allowed", 7));
field_list.push_back(new Item_empty_string("Master_SSL_CA_File",
sizeof(mi->ssl_ca)));
- field_list.push_back(new Item_empty_string("Master_SSL_CA_Path",
+ field_list.push_back(new Item_empty_string("Master_SSL_CA_Path",
sizeof(mi->ssl_capath)));
- field_list.push_back(new Item_empty_string("Master_SSL_Cert",
+ field_list.push_back(new Item_empty_string("Master_SSL_Cert",
sizeof(mi->ssl_cert)));
- field_list.push_back(new Item_empty_string("Master_SSL_Cipher",
+ field_list.push_back(new Item_empty_string("Master_SSL_Cipher",
sizeof(mi->ssl_cipher)));
- field_list.push_back(new Item_empty_string("Master_SSL_Key",
+ field_list.push_back(new Item_empty_string("Master_SSL_Key",
sizeof(mi->ssl_key)));
field_list.push_back(new Item_return_int("Seconds_Behind_Master", 10,
MYSQL_TYPE_LONGLONG));
-
+
if (protocol->send_fields(&field_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
DBUG_RETURN(TRUE);
@@ -2199,7 +2199,7 @@ bool show_master_info(THD* thd, MASTER_INFO* mi)
DBUG_PRINT("info",("host is set: '%s'", mi->host));
String *packet= &thd->packet;
protocol->prepare_for_resend();
-
+
/*
TODO: we read slave_running without run_lock, whereas these variables
are updated under run_lock and not data_lock. In 5.0 we should lock
@@ -2216,8 +2216,8 @@ bool show_master_info(THD* thd, MASTER_INFO* mi)
protocol->store(mi->master_log_name, &my_charset_bin);
protocol->store((ulonglong) mi->master_log_pos);
protocol->store(mi->rli.group_relay_log_name +
- dirname_length(mi->rli.group_relay_log_name),
- &my_charset_bin);
+ dirname_length(mi->rli.group_relay_log_name),
+ &my_charset_bin);
protocol->store((ulonglong) mi->rli.group_relay_log_pos);
protocol->store(mi->rli.group_master_log_name, &my_charset_bin);
protocol->store(mi->slave_running == MYSQL_SLAVE_RUN_CONNECT ?
@@ -2244,13 +2244,13 @@ bool show_master_info(THD* thd, MASTER_INFO* mi)
protocol->store((ulonglong) mi->rli.log_space_total);
protocol->store(
- mi->rli.until_condition==RELAY_LOG_INFO::UNTIL_NONE ? "None":
+ mi->rli.until_condition==RELAY_LOG_INFO::UNTIL_NONE ? "None":
( mi->rli.until_condition==RELAY_LOG_INFO::UNTIL_MASTER_POS? "Master":
"Relay"), &my_charset_bin);
protocol->store(mi->rli.until_log_name, &my_charset_bin);
protocol->store((ulonglong) mi->rli.until_log_pos);
-
-#ifdef HAVE_OPENSSL
+
+#ifdef HAVE_OPENSSL
protocol->store(mi->ssl? "Yes":"No", &my_charset_bin);
#else
protocol->store(mi->ssl? "Ignored":"No", &my_charset_bin);
@@ -2355,10 +2355,10 @@ int flush_master_info(MASTER_INFO* mi, bool flush_relay_log_cache)
my_b_seek(file, 0L);
my_b_printf(file, "%u\n%s\n%s\n%s\n%s\n%s\n%d\n%d\n%d\n%s\n%s\n%s\n%s\n%s\n",
- LINES_IN_MASTER_INFO_WITH_SSL,
+ LINES_IN_MASTER_INFO_WITH_SSL,
mi->master_log_name, llstr(mi->master_log_pos, lbuf),
- mi->host, mi->user,
- mi->password, mi->port, mi->connect_retry,
+ mi->host, mi->user,
+ mi->password, mi->port, mi->connect_retry,
(int)(mi->ssl), mi->ssl_ca, mi->ssl_capath, mi->ssl_cert,
mi->ssl_cipher, mi->ssl_key);
DBUG_RETURN(-flush_io_cache(file));
@@ -2419,7 +2419,7 @@ st_relay_log_info::~st_relay_log_info()
wait_for_pos()
thd client thread that sent SELECT MASTER_POS_WAIT
log_name log name to wait for
- log_pos position to wait for
+ log_pos position to wait for
timeout timeout in seconds before giving up waiting
NOTES
@@ -2458,7 +2458,7 @@ int st_relay_log_info::wait_for_pos(THD* thd, String* log_name,
msg= thd->enter_cond(&data_cond, &data_lock,
"Waiting for the slave SQL thread to "
"advance position");
- /*
+ /*
This function will abort when it notices that some CHANGE MASTER or
RESET MASTER has changed the master info.
To catch this, these commands modify abort_pos_wait ; We just monitor
@@ -2504,7 +2504,7 @@ int st_relay_log_info::wait_for_pos(THD* thd, String* log_name,
{
error= -2;
goto err;
- }
+ }
/* The "compare and wait" main loop */
while (!thd->killed &&
@@ -2564,7 +2564,7 @@ int st_relay_log_info::wait_for_pos(THD* thd, String* log_name,
}
//wait for master update, with optional timeout.
-
+
DBUG_PRINT("info",("Waiting for master update"));
/*
We are going to pthread_cond_(timed)wait(); if the SQL thread stops it
@@ -2574,7 +2574,7 @@ int st_relay_log_info::wait_for_pos(THD* thd, String* log_name,
{
/*
Note that pthread_cond_timedwait checks for the timeout
- before for the condition ; i.e. it returns ETIMEDOUT
+ before for the condition ; i.e. it returns ETIMEDOUT
if the system time equals or exceeds the time specified by abstime
before the condition variable is signaled or broadcast, _or_ if
the absolute time specified by abstime has already passed at the time
@@ -2608,7 +2608,7 @@ improper_arguments: %d timed_out: %d",
(int) (error == -2),
(int) (error == -1)));
if (thd->killed || init_abort_pos_wait != abort_pos_wait ||
- !slave_running)
+ !slave_running)
{
error= -2;
}
@@ -2648,13 +2648,13 @@ static int init_slave_thread(THD* thd, SLAVE_THD_TYPE thd_type)
{
DBUG_ENTER("init_slave_thread");
thd->system_thread = (thd_type == SLAVE_THD_SQL) ?
- SYSTEM_THREAD_SLAVE_SQL : SYSTEM_THREAD_SLAVE_IO;
+ SYSTEM_THREAD_SLAVE_SQL : SYSTEM_THREAD_SLAVE_IO;
thd->security_ctx->skip_grants();
my_net_init(&thd->net, 0);
thd->net.read_timeout = slave_net_timeout;
thd->slave_thread = 1;
set_slave_thread_options(thd);
- /*
+ /*
It's nonsense to constrain the slave threads with max_join_size; if a
query succeeded on master, we HAVE to execute it. So set
OPTION_BIG_SELECTS. Setting max_join_size to HA_POS_ERROR is not enough
@@ -2680,7 +2680,7 @@ static int init_slave_thread(THD* thd, SLAVE_THD_TYPE thd_type)
#if !defined(__WIN__) && !defined(__NETWARE__)
sigset_t set;
- VOID(sigemptyset(&set)); // Get mask in use
+ VOID(sigemptyset(&set)); // Get mask in use
VOID(pthread_sigmask(SIG_UNBLOCK,&set,&thd->block_signals));
#endif
@@ -2695,7 +2695,7 @@ static int init_slave_thread(THD* thd, SLAVE_THD_TYPE thd_type)
static int safe_sleep(THD* thd, int sec, CHECK_KILLED_FUNC thread_killed,
- void* thread_killed_arg)
+ void* thread_killed_arg)
{
int nap_time;
thr_alarm_t alarmed;
@@ -2716,7 +2716,7 @@ static int safe_sleep(THD* thd, int sec, CHECK_KILLED_FUNC thread_killed,
thr_alarm(&alarmed, 2 * nap_time, &alarm_buff);
sleep(nap_time);
thr_end_alarm(&alarmed);
-
+
if ((*thread_killed)(thd,thread_killed_arg))
DBUG_RETURN(1);
start_time=time((time_t*) 0);
@@ -2726,7 +2726,7 @@ static int safe_sleep(THD* thd, int sec, CHECK_KILLED_FUNC thread_killed,
static int request_dump(MYSQL* mysql, MASTER_INFO* mi,
- bool *suppress_warnings)
+ bool *suppress_warnings)
{
char buf[FN_REFLEN + 10];
int len;
@@ -2748,11 +2748,11 @@ static int request_dump(MYSQL* mysql, MASTER_INFO* mi,
now we just fill up the error log :-)
*/
if (mysql_errno(mysql) == ER_NET_READ_INTERRUPTED)
- *suppress_warnings= 1; // Suppress reconnect warning
+ *suppress_warnings= 1; // Suppress reconnect warning
else
sql_print_error("Error on COM_BINLOG_DUMP: %d %s, will retry in %d secs",
- mysql_errno(mysql), mysql_error(mysql),
- master_connect_retry);
+ mysql_errno(mysql), mysql_error(mysql),
+ master_connect_retry);
DBUG_RETURN(1);
}
@@ -2772,14 +2772,14 @@ static int request_table_dump(MYSQL* mysql, const char* db, const char* table)
{
sql_print_error("request_table_dump: Buffer overrun");
DBUG_RETURN(1);
- }
-
+ }
+
*p++ = db_len;
memcpy(p, db, db_len);
p += db_len;
*p++ = table_len;
memcpy(p, table, table_len);
-
+
if (simple_command(mysql, COM_TABLE_DUMP, buf, p - buf + table_len, 1))
{
sql_print_error("request_table_dump: Error sending the table dump \
@@ -2793,19 +2793,19 @@ command");
/*
Read one event from the master
-
+
SYNOPSIS
read_event()
- mysql MySQL connection
- mi Master connection information
- suppress_warnings TRUE when a normal net read timeout has caused us to
- try a reconnect. We do not want to print anything to
- the error log in this case because this a anormal
- event in an idle server.
+ mysql MySQL connection
+ mi Master connection information
+ suppress_warnings TRUE when a normal net read timeout has caused us to
+ try a reconnect. We do not want to print anything to
+ the error log in this case because this a anormal
+ event in an idle server.
RETURN VALUES
- 'packet_error' Error
- number Length of packet
+ 'packet_error' Error
+ number Length of packet
*/
static ulong read_event(MYSQL* mysql, MASTER_INFO *mi, bool* suppress_warnings)
@@ -2820,24 +2820,24 @@ static ulong read_event(MYSQL* mysql, MASTER_INFO *mi, bool* suppress_warnings)
*/
#ifndef DBUG_OFF
if (disconnect_slave_event_count && !(mi->events_till_disconnect--))
- DBUG_RETURN(packet_error);
+ DBUG_RETURN(packet_error);
#endif
-
+
len = net_safe_read(mysql);
if (len == packet_error || (long) len < 1)
{
if (mysql_errno(mysql) == ER_NET_READ_INTERRUPTED)
{
/*
- We are trying a normal reconnect after a read timeout;
- we suppress prints to .err file as long as the reconnect
- happens without problems
+ We are trying a normal reconnect after a read timeout;
+ we suppress prints to .err file as long as the reconnect
+ happens without problems
*/
*suppress_warnings= TRUE;
}
else
sql_print_error("Error reading packet from server: %s ( server_errno=%d)",
- mysql_error(mysql), mysql_errno(mysql));
+ mysql_error(mysql), mysql_errno(mysql));
DBUG_RETURN(packet_error);
}
@@ -2846,13 +2846,13 @@ static ulong read_event(MYSQL* mysql, MASTER_INFO *mi, bool* suppress_warnings)
{
sql_print_information("Slave: received end packet from server, apparent "
"master shutdown: %s",
- mysql_error(mysql));
+ mysql_error(mysql));
DBUG_RETURN(packet_error);
}
-
+
DBUG_PRINT("info",( "len=%u, net->read_pos[4] = %d\n",
- len, mysql->net.read_pos[4]));
- DBUG_RETURN(len - 1);
+ len, mysql->net.read_pos[4]));
+ DBUG_RETURN(len - 1);
}
@@ -2862,8 +2862,8 @@ int check_expected_error(THD* thd, RELAY_LOG_INFO* rli, int expected_error)
switch (expected_error) {
case ER_NET_READ_ERROR:
- case ER_NET_ERROR_ON_WRITE:
- case ER_SERVER_SHUTDOWN:
+ case ER_NET_ERROR_ON_WRITE:
+ case ER_SERVER_SHUTDOWN:
case ER_NEW_ABORTING_CONNECTION:
DBUG_RETURN(1);
default:
@@ -2876,25 +2876,25 @@ int check_expected_error(THD* thd, RELAY_LOG_INFO* rli, int expected_error)
SYNOPSYS
st_relay_log_info::is_until_satisfied()
DESCRIPTION
- Checks if UNTIL condition is reached. Uses caching result of last
- comparison of current log file name and target log file name. So cached
- value should be invalidated if current log file name changes
+ Checks if UNTIL condition is reached. Uses caching result of last
+ comparison of current log file name and target log file name. So cached
+ value should be invalidated if current log file name changes
(see st_relay_log_info::notify_... functions).
-
- This caching is needed to avoid of expensive string comparisons and
+
+ This caching is needed to avoid of expensive string comparisons and
strtol() conversions needed for log names comparison. We don't need to
- compare them each time this function is called, we only need to do this
- when current log name changes. If we have UNTIL_MASTER_POS condition we
- need to do this only after Rotate_log_event::exec_event() (which is
- rare, so caching gives real benifit), and if we have UNTIL_RELAY_POS
- condition then we should invalidate cached comarison value after
+ compare them each time this function is called, we only need to do this
+ when current log name changes. If we have UNTIL_MASTER_POS condition we
+ need to do this only after Rotate_log_event::exec_event() (which is
+ rare, so caching gives real benifit), and if we have UNTIL_RELAY_POS
+ condition then we should invalidate cached comarison value after
inc_group_relay_log_pos() which called for each group of events (so we
- have some benefit if we have something like queries that use
+ have some benefit if we have something like queries that use
autoincrement or if we have transactions).
-
+
Should be called ONLY if until_condition != UNTIL_NONE !
RETURN VALUE
- true - condition met or error happened (condition seems to have
+ true - condition met or error happened (condition seems to have
bad log file name)
false - condition not met
*/
@@ -2906,7 +2906,7 @@ bool st_relay_log_info::is_until_satisfied()
DBUG_ENTER("st_relay_log_info::is_until_satisfied");
DBUG_ASSERT(until_condition != UNTIL_NONE);
-
+
if (until_condition == UNTIL_MASTER_POS)
{
log_name= group_master_log_name;
@@ -2917,7 +2917,7 @@ bool st_relay_log_info::is_until_satisfied()
log_name= group_relay_log_name;
log_pos= group_relay_log_pos;
}
-
+
if (until_log_names_cmp_result == UNTIL_LOG_NAMES_CMP_UNKNOWN)
{
/*
@@ -2931,7 +2931,7 @@ bool st_relay_log_info::is_until_satisfied()
if (*log_name)
{
const char *basename= log_name + dirname_length(log_name);
-
+
const char *q= (const char*)(fn_ext(basename)+1);
if (strncmp(basename, until_log_name, (int)(q-basename)) == 0)
{
@@ -2941,11 +2941,11 @@ bool st_relay_log_info::is_until_satisfied()
if (log_name_extension < until_log_name_extension)
until_log_names_cmp_result= UNTIL_LOG_NAMES_CMP_LESS;
else
- until_log_names_cmp_result=
- (log_name_extension > until_log_name_extension) ?
+ until_log_names_cmp_result=
+ (log_name_extension > until_log_name_extension) ?
UNTIL_LOG_NAMES_CMP_GREATER : UNTIL_LOG_NAMES_CMP_EQUAL ;
}
- else
+ else
{
/* Probably error so we aborting */
sql_print_error("Slave SQL thread is stopped because UNTIL "
@@ -2956,8 +2956,8 @@ bool st_relay_log_info::is_until_satisfied()
else
DBUG_RETURN(until_log_pos == 0);
}
-
- DBUG_RETURN(((until_log_names_cmp_result == UNTIL_LOG_NAMES_CMP_EQUAL &&
+
+ DBUG_RETURN(((until_log_names_cmp_result == UNTIL_LOG_NAMES_CMP_EQUAL &&
log_pos >= until_log_pos) ||
until_log_names_cmp_result == UNTIL_LOG_NAMES_CMP_GREATER));
}
@@ -3143,7 +3143,7 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli)
pthread_mutex_unlock(&rli->data_lock);
thd->server_id = ev->server_id; // use the original server id for logging
- thd->set_time(); // time the query
+ thd->set_time(); // time the query
thd->lex->current_select= 0;
if (!ev->when)
ev->when = time(NULL);
@@ -3192,16 +3192,16 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli)
{
exec_res= 0;
end_trans(thd, ROLLBACK);
- /* chance for concurrent connection to get more locks */
+ /* chance for concurrent connection to get more locks */
safe_sleep(thd, min(rli->trans_retries, MAX_SLAVE_RETRY_PAUSE),
- (CHECK_KILLED_FUNC)sql_slave_killed, (void*)rli);
+ (CHECK_KILLED_FUNC)sql_slave_killed, (void*)rli);
pthread_mutex_lock(&rli->data_lock); // because of SHOW STATUS
- rli->trans_retries++;
+ rli->trans_retries++;
rli->retried_trans++;
pthread_mutex_unlock(&rli->data_lock);
DBUG_PRINT("info", ("Slave retries transaction "
"rli->trans_retries: %lu", rli->trans_retries));
- }
+ }
}
else
sql_print_error("Slave SQL thread retried transaction %lu time(s) "
@@ -3285,8 +3285,8 @@ pthread_handler_t handle_slave_io(void *arg)
pthread_cond_broadcast(&mi->start_cond);
DBUG_PRINT("master_info",("log_file_name: '%s' position: %s",
- mi->master_log_name,
- llstr(mi->master_log_pos,llbuff)));
+ mi->master_log_name,
+ llstr(mi->master_log_pos,llbuff)));
if (!(mi->mysql = mysql = mysql_init(NULL)))
{
@@ -3299,9 +3299,9 @@ pthread_handler_t handle_slave_io(void *arg)
if (!safe_connect(thd, mysql, mi))
sql_print_information("Slave I/O thread: connected to master '%s@%s:%d',\
replication started in log '%s' at position %s", mi->user,
- mi->host, mi->port,
- IO_RPL_LOG_NAME,
- llstr(mi->master_log_pos,llbuff));
+ mi->host, mi->port,
+ IO_RPL_LOG_NAME,
+ llstr(mi->master_log_pos,llbuff));
else
{
sql_print_information("Slave I/O thread killed while connecting to master");
@@ -3339,9 +3339,9 @@ connected:
sql_print_error("Failed on request_dump()");
if (io_slave_killed(thd,mi))
{
- sql_print_information("Slave I/O thread killed while requesting master \
+ sql_print_information("Slave I/O thread killed while requesting master \
dump");
- goto err;
+ goto err;
}
mi->slave_running= MYSQL_SLAVE_RUN_NOT_CONNECT;
@@ -3351,35 +3351,35 @@ dump");
#endif
end_server(mysql);
/*
- First time retry immediately, assuming that we can recover
- right away - if first time fails, sleep between re-tries
- hopefuly the admin can fix the problem sometime
+ First time retry immediately, assuming that we can recover
+ right away - if first time fails, sleep between re-tries
+ hopefuly the admin can fix the problem sometime
*/
if (retry_count++)
{
- if (retry_count > master_retry_count)
- goto err; // Don't retry forever
- safe_sleep(thd,mi->connect_retry,(CHECK_KILLED_FUNC)io_slave_killed,
- (void*)mi);
+ if (retry_count > master_retry_count)
+ goto err; // Don't retry forever
+ safe_sleep(thd,mi->connect_retry,(CHECK_KILLED_FUNC)io_slave_killed,
+ (void*)mi);
}
if (io_slave_killed(thd,mi))
{
- sql_print_information("Slave I/O thread killed while retrying master \
+ sql_print_information("Slave I/O thread killed while retrying master \
dump");
- goto err;
+ goto err;
}
thd->proc_info = "Reconnecting after a failed binlog dump request";
if (!suppress_warnings)
- sql_print_error("Slave I/O thread: failed dump request, \
+ sql_print_error("Slave I/O thread: failed dump request, \
reconnecting to try again, log '%s' at postion %s", IO_RPL_LOG_NAME,
- llstr(mi->master_log_pos,llbuff));
+ llstr(mi->master_log_pos,llbuff));
if (safe_reconnect(thd, mysql, mi, suppress_warnings) ||
- io_slave_killed(thd,mi))
+ io_slave_killed(thd,mi))
{
- sql_print_information("Slave I/O thread killed during or \
+ sql_print_information("Slave I/O thread killed during or \
after reconnect");
- goto err;
+ goto err;
}
goto connected;
@@ -3398,72 +3398,72 @@ after reconnect");
ulong event_len = read_event(mysql, mi, &suppress_warnings);
if (io_slave_killed(thd,mi))
{
- if (global_system_variables.log_warnings)
- sql_print_information("Slave I/O thread killed while reading event");
- goto err;
+ if (global_system_variables.log_warnings)
+ sql_print_information("Slave I/O thread killed while reading event");
+ goto err;
}
if (event_len == packet_error)
{
- uint mysql_error_number= mysql_errno(mysql);
- if (mysql_error_number == ER_NET_PACKET_TOO_LARGE)
- {
- sql_print_error("\
+ uint mysql_error_number= mysql_errno(mysql);
+ if (mysql_error_number == ER_NET_PACKET_TOO_LARGE)
+ {
+ sql_print_error("\
Log entry on master is longer than max_allowed_packet (%ld) on \
slave. If the entry is correct, restart the server with a higher value of \
max_allowed_packet",
- thd->variables.max_allowed_packet);
- goto err;
- }
- if (mysql_error_number == ER_MASTER_FATAL_ERROR_READING_BINLOG)
- {
- sql_print_error(ER(mysql_error_number), mysql_error_number,
- mysql_error(mysql));
- goto err;
- }
+ thd->variables.max_allowed_packet);
+ goto err;
+ }
+ if (mysql_error_number == ER_MASTER_FATAL_ERROR_READING_BINLOG)
+ {
+ sql_print_error(ER(mysql_error_number), mysql_error_number,
+ mysql_error(mysql));
+ goto err;
+ }
mi->slave_running= MYSQL_SLAVE_RUN_NOT_CONNECT;
- thd->proc_info = "Waiting to reconnect after a failed master event read";
+ thd->proc_info = "Waiting to reconnect after a failed master event read";
#ifdef SIGNAL_WITH_VIO_CLOSE
thd->clear_active_vio();
#endif
- end_server(mysql);
- if (retry_count++)
- {
- if (retry_count > master_retry_count)
- goto err; // Don't retry forever
- safe_sleep(thd,mi->connect_retry,(CHECK_KILLED_FUNC)io_slave_killed,
- (void*) mi);
- }
- if (io_slave_killed(thd,mi))
- {
- if (global_system_variables.log_warnings)
- sql_print_information("Slave I/O thread killed while waiting to \
+ end_server(mysql);
+ if (retry_count++)
+ {
+ if (retry_count > master_retry_count)
+ goto err; // Don't retry forever
+ safe_sleep(thd,mi->connect_retry,(CHECK_KILLED_FUNC)io_slave_killed,
+ (void*) mi);
+ }
+ if (io_slave_killed(thd,mi))
+ {
+ if (global_system_variables.log_warnings)
+ sql_print_information("Slave I/O thread killed while waiting to \
reconnect after a failed read");
- goto err;
- }
- thd->proc_info = "Reconnecting after a failed master event read";
- if (!suppress_warnings)
- sql_print_information("Slave I/O thread: Failed reading log event, \
+ goto err;
+ }
+ thd->proc_info = "Reconnecting after a failed master event read";
+ if (!suppress_warnings)
+ sql_print_information("Slave I/O thread: Failed reading log event, \
reconnecting to retry, log '%s' position %s", IO_RPL_LOG_NAME,
- llstr(mi->master_log_pos, llbuff));
- if (safe_reconnect(thd, mysql, mi, suppress_warnings) ||
- io_slave_killed(thd,mi))
- {
- if (global_system_variables.log_warnings)
- sql_print_information("Slave I/O thread killed during or after a \
+ llstr(mi->master_log_pos, llbuff));
+ if (safe_reconnect(thd, mysql, mi, suppress_warnings) ||
+ io_slave_killed(thd,mi))
+ {
+ if (global_system_variables.log_warnings)
+ sql_print_information("Slave I/O thread killed during or after a \
reconnect done to recover from failed read");
- goto err;
- }
- goto connected;
+ goto err;
+ }
+ goto connected;
} // if (event_len == packet_error)
- retry_count=0; // ok event, reset retry counter
+ retry_count=0; // ok event, reset retry counter
thd->proc_info = "Queueing master event to the relay log";
if (queue_event(mi,(const char*)mysql->net.read_pos + 1,
- event_len))
+ event_len))
{
- sql_print_error("Slave I/O thread could not queue event from master");
- goto err;
+ sql_print_error("Slave I/O thread could not queue event from master");
+ goto err;
}
if (flush_master_info(mi, 1))
{
@@ -3489,27 +3489,27 @@ reconnect done to recover from failed read");
ignore_log_space_limit=%d",
llstr(rli->log_space_limit,llbuf1),
llstr(rli->log_space_total,llbuf2),
- (int) rli->ignore_log_space_limit));
+ (int) rli->ignore_log_space_limit));
}
#endif
if (rli->log_space_limit && rli->log_space_limit <
- rli->log_space_total &&
+ rli->log_space_total &&
!rli->ignore_log_space_limit)
- if (wait_for_relay_log_space(rli))
- {
- sql_print_error("Slave I/O thread aborted while waiting for relay \
+ if (wait_for_relay_log_space(rli))
+ {
+ sql_print_error("Slave I/O thread aborted while waiting for relay \
log space");
- goto err;
- }
- }
+ goto err;
+ }
+ }
}
// error = 0;
err:
// print the current replication position
sql_print_information("Slave I/O thread exiting, read up to log '%s', position %s",
- IO_RPL_LOG_NAME, llstr(mi->master_log_pos,llbuff));
+ IO_RPL_LOG_NAME, llstr(mi->master_log_pos,llbuff));
VOID(pthread_mutex_lock(&LOCK_thread_count));
thd->query = thd->db = 0; // extra safety
thd->query_length= thd->db_length= 0;
@@ -3533,15 +3533,12 @@ err:
write_ignored_events_info_to_relay_log(thd, mi);
thd->proc_info = "Waiting for slave mutex on exit";
pthread_mutex_lock(&mi->run_lock);
- mi->slave_running = 0;
- mi->io_thd = 0;
/* Forget the relay log's format */
delete mi->rli.relay_log.description_event_for_queue;
mi->rli.relay_log.description_event_for_queue= 0;
// TODO: make rpl_status part of MASTER_INFO
change_rpl_status(RPL_ACTIVE_SLAVE,RPL_IDLE_SLAVE);
- mi->abort_slave = 0; // TODO: check if this is needed
DBUG_ASSERT(thd->net.buff != 0);
net_end(&thd->net); // destructor will not free it, because net.vio is 0
close_thread_tables(thd, 0);
@@ -3549,11 +3546,14 @@ err:
THD_CHECK_SENTRY(thd);
delete thd;
pthread_mutex_unlock(&LOCK_thread_count);
- pthread_cond_broadcast(&mi->stop_cond); // tell the world we are done
+ mi->abort_slave = 0;
+ mi->slave_running = 0;
+ mi->io_thd = 0;
pthread_mutex_unlock(&mi->run_lock);
+ pthread_cond_broadcast(&mi->stop_cond); // tell the world we are done
my_thread_end();
pthread_exit(0);
- DBUG_RETURN(0); // Can't return anything here
+ DBUG_RETURN(0); // Can't return anything here
}
@@ -3561,7 +3561,7 @@ err:
pthread_handler_t handle_slave_sql(void *arg)
{
- THD *thd; /* needs to be first for thread_stack */
+ THD *thd; /* needs to be first for thread_stack */
char llbuff[22],llbuff1[22];
RELAY_LOG_INFO* rli = &((MASTER_INFO*)arg)->rli;
@@ -3575,13 +3575,13 @@ pthread_handler_t handle_slave_sql(void *arg)
pthread_mutex_lock(&rli->run_lock);
DBUG_ASSERT(!rli->slave_running);
errmsg= 0;
-#ifndef DBUG_OFF
+#ifndef DBUG_OFF
rli->events_till_abort = abort_slave_event_count;
-#endif
+#endif
thd = new THD; // note that contructor of THD uses DBUG_ !
thd->thread_stack = (char*)&thd; // remember where our stack is
-
+
/* Inform waiting threads that slave has started */
rli->slave_run_id++;
@@ -3635,13 +3635,13 @@ pthread_handler_t handle_slave_sql(void *arg)
rli->trans_retries= 0; // start from "no error"
if (init_relay_log_pos(rli,
- rli->group_relay_log_name,
- rli->group_relay_log_pos,
- 1 /*need data lock*/, &errmsg,
+ rli->group_relay_log_name,
+ rli->group_relay_log_pos,
+ 1 /*need data lock*/, &errmsg,
1 /*look for a description_event*/))
{
sql_print_error("Error initializing relay log position: %s",
- errmsg);
+ errmsg);
goto err;
}
THD_CHECK_SENTRY(thd);
@@ -3649,7 +3649,7 @@ pthread_handler_t handle_slave_sql(void *arg)
{
char llbuf1[22], llbuf2[22];
DBUG_PRINT("info", ("my_b_tell(rli->cur_log)=%s rli->event_relay_log_pos=%s",
- llstr(my_b_tell(rli->cur_log),llbuf1),
+ llstr(my_b_tell(rli->cur_log),llbuf1),
llstr(rli->event_relay_log_pos,llbuf2)));
DBUG_ASSERT(rli->event_relay_log_pos >= BIN_LOG_HEADER_SIZE);
/*
@@ -3672,13 +3672,13 @@ pthread_handler_t handle_slave_sql(void *arg)
DBUG_ASSERT(rli->sql_thd == thd);
DBUG_PRINT("master_info",("log_file_name: %s position: %s",
- rli->group_master_log_name,
- llstr(rli->group_master_log_pos,llbuff)));
+ rli->group_master_log_name,
+ llstr(rli->group_master_log_pos,llbuff)));
if (global_system_variables.log_warnings)
sql_print_information("Slave SQL thread initialized, starting replication in \
log '%s' at position %s, relay log '%s' position: %s", RPL_LOG_NAME,
- llstr(rli->group_master_log_pos,llbuff),rli->group_relay_log_name,
- llstr(rli->group_relay_log_pos,llbuff1));
+ llstr(rli->group_master_log_pos,llbuff),rli->group_relay_log_name,
+ llstr(rli->group_relay_log_pos,llbuff1));
/* execute init_slave variable */
if (sys_init_slave.value_length)
@@ -3742,8 +3742,8 @@ the slave SQL thread with \"SLAVE START\". We stopped at log \
/* Thread stopped. Print the current replication position to the log */
sql_print_information("Slave SQL thread exiting, replication stopped in log "
- "'%s' at position %s",
- RPL_LOG_NAME, llstr(rli->group_master_log_pos,llbuff));
+ "'%s' at position %s",
+ RPL_LOG_NAME, llstr(rli->group_master_log_pos,llbuff));
err:
@@ -3760,7 +3760,7 @@ the slave SQL thread with \"SLAVE START\". We stopped at log \
should already have done these assignments (each event which sets these
variables is supposed to set them to 0 before terminating)).
*/
- thd->query= thd->db= thd->catalog= 0;
+ thd->query= thd->db= thd->catalog= 0;
thd->query_length= thd->db_length= 0;
VOID(pthread_mutex_unlock(&LOCK_thread_count));
thd->proc_info = "Waiting for slave mutex on exit";
@@ -3769,7 +3769,7 @@ the slave SQL thread with \"SLAVE START\". We stopped at log \
pthread_mutex_lock(&rli->data_lock);
DBUG_ASSERT(rli->slave_running == 1); // tracking buffer overrun
/* When master_pos_wait() wakes up it will check this and terminate */
- rli->slave_running= 0;
+ rli->slave_running= 0;
/* Forget the relay log's format */
delete rli->relay_log.description_event_for_exec;
rli->relay_log.description_event_for_exec= 0;
@@ -3801,7 +3801,7 @@ the slave SQL thread with \"SLAVE START\". We stopped at log \
pthread_mutex_unlock(&rli->run_lock);
my_thread_end();
pthread_exit(0);
- DBUG_RETURN(0); // Can't return anything here
+ DBUG_RETURN(0); // Can't return anything here
}
@@ -3830,11 +3830,11 @@ static int process_io_create_file(MASTER_INFO* mi, Create_file_log_event* cev)
thd->file_id = cev->file_id = mi->file_id++;
thd->server_id = cev->server_id;
cev_not_written = 1;
-
+
if (unlikely(net_request_file(net,cev->fname)))
{
sql_print_error("Slave I/O: failed requesting download of '%s'",
- cev->fname);
+ cev->fname);
goto err;
}
@@ -3845,18 +3845,18 @@ static int process_io_create_file(MASTER_INFO* mi, Create_file_log_event* cev)
*/
{
Append_block_log_event aev(thd,0,0,0,0);
-
+
for (;;)
{
if (unlikely((num_bytes=my_net_read(net)) == packet_error))
{
- sql_print_error("Network read error downloading '%s' from master",
- cev->fname);
- goto err;
+ sql_print_error("Network read error downloading '%s' from master",
+ cev->fname);
+ goto err;
}
if (unlikely(!num_bytes)) /* eof */
{
- net_write_command(net, 0, "", 0, "", 0);/* 3.23 master wants it */
+ net_write_command(net, 0, "", 0, "", 0);/* 3.23 master wants it */
/*
If we wrote Create_file_log_event, then we need to write
Execute_load_log_event. If we did not write Create_file_log_event,
@@ -3864,43 +3864,43 @@ static int process_io_create_file(MASTER_INFO* mi, Create_file_log_event* cev)
INFILE had not existed, i.e. write nothing.
*/
if (unlikely(cev_not_written))
- break;
- Execute_load_log_event xev(thd,0,0);
- xev.log_pos = cev->log_pos;
- if (unlikely(mi->rli.relay_log.append(&xev)))
- {
- sql_print_error("Slave I/O: error writing Exec_load event to \
+ break;
+ Execute_load_log_event xev(thd,0,0);
+ xev.log_pos = cev->log_pos;
+ if (unlikely(mi->rli.relay_log.append(&xev)))
+ {
+ sql_print_error("Slave I/O: error writing Exec_load event to \
relay log");
- goto err;
- }
- mi->rli.relay_log.harvest_bytes_written(&mi->rli.log_space_total);
- break;
+ goto err;
+ }
+ mi->rli.relay_log.harvest_bytes_written(&mi->rli.log_space_total);
+ break;
}
if (unlikely(cev_not_written))
{
- cev->block = (char*)net->read_pos;
- cev->block_len = num_bytes;
- if (unlikely(mi->rli.relay_log.append(cev)))
- {
- sql_print_error("Slave I/O: error writing Create_file event to \
+ cev->block = (char*)net->read_pos;
+ cev->block_len = num_bytes;
+ if (unlikely(mi->rli.relay_log.append(cev)))
+ {
+ sql_print_error("Slave I/O: error writing Create_file event to \
relay log");
- goto err;
- }
- cev_not_written=0;
- mi->rli.relay_log.harvest_bytes_written(&mi->rli.log_space_total);
+ goto err;
+ }
+ cev_not_written=0;
+ mi->rli.relay_log.harvest_bytes_written(&mi->rli.log_space_total);
}
else
{
- aev.block = (char*)net->read_pos;
- aev.block_len = num_bytes;
- aev.log_pos = cev->log_pos;
- if (unlikely(mi->rli.relay_log.append(&aev)))
- {
- sql_print_error("Slave I/O: error writing Append_block event to \
+ aev.block = (char*)net->read_pos;
+ aev.block_len = num_bytes;
+ aev.log_pos = cev->log_pos;
+ if (unlikely(mi->rli.relay_log.append(&aev)))
+ {
+ sql_print_error("Slave I/O: error writing Append_block event to \
relay log");
- goto err;
- }
- mi->rli.relay_log.harvest_bytes_written(&mi->rli.log_space_total) ;
+ goto err;
+ }
+ mi->rli.relay_log.harvest_bytes_written(&mi->rli.log_space_total) ;
}
}
}
@@ -3915,8 +3915,8 @@ err:
SYNOPSIS
process_io_rotate()
- mi master_info for the slave
- rev The rotate log event read from the binary log
+ mi master_info for the slave
+ rev The rotate log event read from the binary log
DESCRIPTION
Updates the master info with the place in the next binary
@@ -3927,8 +3927,8 @@ err:
We assume we already locked mi->data_lock
RETURN VALUES
- 0 ok
- 1 Log event is illegal
+ 0 ok
+ 1 Log event is illegal
*/
@@ -3944,7 +3944,7 @@ static int process_io_rotate(MASTER_INFO *mi, Rotate_log_event *rev)
memcpy(mi->master_log_name, rev->new_log_ident, rev->ident_len+1);
mi->master_log_pos= rev->pos;
DBUG_PRINT("info", ("master_log_pos: '%s' %d",
- mi->master_log_name, (ulong) mi->master_log_pos));
+ mi->master_log_name, (ulong) mi->master_log_pos));
#ifndef DBUG_OFF
/*
If we do not do this, we will be getting the first
@@ -3981,7 +3981,7 @@ static int process_io_rotate(MASTER_INFO *mi, Rotate_log_event *rev)
copied from MySQL 4.0.
*/
static int queue_binlog_ver_1_event(MASTER_INFO *mi, const char *buf,
- ulong event_len)
+ ulong event_len)
{
const char *errmsg = 0;
ulong inc_pos;
@@ -4025,7 +4025,7 @@ static int queue_binlog_ver_1_event(MASTER_INFO *mi, const char *buf,
{
sql_print_error("Read invalid event from master: '%s',\
master could be corrupt but a more likely cause of this is a bug",
- errmsg);
+ errmsg);
my_free((char*) tmp_buf, MYF(MY_ALLOW_ZERO_PTR));
DBUG_RETURN(1);
}
@@ -4071,8 +4071,8 @@ static int queue_binlog_ver_1_event(MASTER_INFO *mi, const char *buf,
}
if (likely(!ignore_event))
{
- if (ev->log_pos)
- /*
+ if (ev->log_pos)
+ /*
Don't do it for fake Rotate events (see comment in
Log_event::Log_event(const char* buf...) in log_event.cc).
*/
@@ -4097,7 +4097,7 @@ static int queue_binlog_ver_1_event(MASTER_INFO *mi, const char *buf,
from queue_binlog_ver_1_event(), with some affordable simplifications.
*/
static int queue_binlog_ver_3_event(MASTER_INFO *mi, const char *buf,
- ulong event_len)
+ ulong event_len)
{
const char *errmsg = 0;
ulong inc_pos;
@@ -4112,7 +4112,7 @@ static int queue_binlog_ver_3_event(MASTER_INFO *mi, const char *buf,
{
sql_print_error("Read invalid event from master: '%s',\
master could be corrupt but a more likely cause of this is a bug",
- errmsg);
+ errmsg);
my_free((char*) tmp_buf, MYF(MY_ALLOW_ZERO_PTR));
DBUG_RETURN(1);
}
@@ -4155,13 +4155,13 @@ err:
(exactly, slave's) format. To do the conversion, we create a 5.0 event from
the 3.23/4.0 bytes, then write this event to the relay log.
- TODO:
+ TODO:
Test this code before release - it has to be tested on a separate
setup with 3.23 master or 4.0 master
*/
static int queue_old_event(MASTER_INFO *mi, const char *buf,
- ulong event_len)
+ ulong event_len)
{
DBUG_ENTER("queue_old_event");
@@ -4173,7 +4173,7 @@ static int queue_old_event(MASTER_INFO *mi, const char *buf,
DBUG_RETURN(queue_binlog_ver_3_event(mi,buf,event_len));
default: /* unsupported format; eg version 2 */
DBUG_PRINT("info",("unsupported binlog format %d in queue_old_event()",
- mi->rli.relay_log.description_event_for_queue->binlog_version));
+ mi->rli.relay_log.description_event_for_queue->binlog_version));
DBUG_RETURN(1);
}
}
@@ -4211,7 +4211,7 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len)
cleaning is already done on a per-master-thread basis (as the master
server is shutting down cleanly, it has written all DROP TEMPORARY TABLE
prepared statements' deletion are TODO only when we binlog prep stmts).
-
+
We don't even increment mi->master_log_pos, because we may be just after
a Rotate event. Btw, in a few milliseconds we are going to have a Start
event from the next binlog (unless the master is presently running
@@ -4220,7 +4220,7 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len)
goto err;
case ROTATE_EVENT:
{
- Rotate_log_event rev(buf,event_len,mi->rli.relay_log.description_event_for_queue);
+ Rotate_log_event rev(buf,event_len,mi->rli.relay_log.description_event_for_queue);
if (unlikely(process_io_rotate(mi,&rev)))
{
error= 1;
@@ -4255,17 +4255,17 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len)
}
delete mi->rli.relay_log.description_event_for_queue;
mi->rli.relay_log.description_event_for_queue= tmp;
- /*
+ /*
Though this does some conversion to the slave's format, this will
- preserve the master's binlog format version, and number of event types.
+ preserve the master's binlog format version, and number of event types.
*/
- /*
+ /*
If the event was not requested by the slave (the slave did not ask for
- it), i.e. has end_log_pos=0, we do not increment mi->master_log_pos
+ it), i.e. has end_log_pos=0, we do not increment mi->master_log_pos
*/
inc_pos= uint4korr(buf+LOG_POS_OFFSET) ? event_len : 0;
DBUG_PRINT("info",("binlog format is now %d",
- mi->rli.relay_log.description_event_for_queue->binlog_version));
+ mi->rli.relay_log.description_event_for_queue->binlog_version));
}
break;
@@ -4274,8 +4274,8 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len)
break;
}
- /*
- If this event is originating from this server, don't queue it.
+ /*
+ If this event is originating from this server, don't queue it.
We don't check this for 3.23 events because it's simpler like this; 3.23
will be filtered anyway by the SQL slave thread which also tests the
server id (we must also keep this test in the SQL thread, in case somebody
@@ -4317,7 +4317,7 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len)
}
rli->relay_log.signal_update(); // the slave SQL thread needs to re-check
DBUG_PRINT("info", ("master_log_pos: %d, event originating from the same server, ignored", (ulong) mi->master_log_pos));
- }
+ }
else
{
/* write the event to the relay log */
@@ -4376,13 +4376,13 @@ void end_relay_log_info(RELAY_LOG_INFO* rli)
SYNPOSIS
safe_connect()
- thd Thread handler for slave
- mysql MySQL connection handle
- mi Replication handle
+ thd Thread handler for slave
+ mysql MySQL connection handle
+ mi Replication handle
RETURN
- 0 ok
- # Error
+ 0 ok
+ # Error
*/
static int safe_connect(THD* thd, MYSQL* mysql, MASTER_INFO* mi)
@@ -4403,10 +4403,10 @@ static int safe_connect(THD* thd, MYSQL* mysql, MASTER_INFO* mi)
*/
static int connect_to_master(THD* thd, MYSQL* mysql, MASTER_INFO* mi,
- bool reconnect, bool suppress_warnings)
+ bool reconnect, bool suppress_warnings)
{
int slave_was_killed;
- int last_errno= -2; // impossible error
+ int last_errno= -2; // impossible error
ulong err_count=0;
char llbuff[22];
DBUG_ENTER("connect_to_master");
@@ -4416,16 +4416,16 @@ static int connect_to_master(THD* thd, MYSQL* mysql, MASTER_INFO* mi,
#endif
ulong client_flag= CLIENT_REMEMBER_OPTIONS;
if (opt_slave_compressed_protocol)
- client_flag=CLIENT_COMPRESS; /* We will use compression */
+ client_flag=CLIENT_COMPRESS; /* We will use compression */
mysql_options(mysql, MYSQL_OPT_CONNECT_TIMEOUT, (char *) &slave_net_timeout);
mysql_options(mysql, MYSQL_OPT_READ_TIMEOUT, (char *) &slave_net_timeout);
-
+
#ifdef HAVE_OPENSSL
if (mi->ssl)
- mysql_ssl_set(mysql,
+ mysql_ssl_set(mysql,
mi->ssl_key[0]?mi->ssl_key:0,
- mi->ssl_cert[0]?mi->ssl_cert:0,
+ mi->ssl_cert[0]?mi->ssl_cert:0,
mi->ssl_ca[0]?mi->ssl_ca:0,
mi->ssl_capath[0]?mi->ssl_capath:0,
mi->ssl_cipher[0]?mi->ssl_cipher:0);
@@ -4436,9 +4436,9 @@ static int connect_to_master(THD* thd, MYSQL* mysql, MASTER_INFO* mi,
mysql_options(mysql, MYSQL_SET_CHARSET_DIR, (char *) charsets_dir);
while (!(slave_was_killed = io_slave_killed(thd,mi)) &&
- (reconnect ? mysql_reconnect(mysql) != 0 :
- mysql_real_connect(mysql, mi->host, mi->user, mi->password, 0,
- mi->port, 0, client_flag) == 0))
+ (reconnect ? mysql_reconnect(mysql) != 0 :
+ mysql_real_connect(mysql, mi->host, mi->user, mi->password, 0,
+ mi->port, 0, client_flag) == 0))
{
/* Don't repeat last error */
if ((int)mysql_errno(mysql) != last_errno)
@@ -4448,11 +4448,11 @@ static int connect_to_master(THD* thd, MYSQL* mysql, MASTER_INFO* mi,
sql_print_error("Slave I/O thread: error %s to master \
'%s@%s:%d': \
Error: '%s' errno: %d retry-time: %d retries: %d",
- (reconnect ? "reconnecting" : "connecting"),
- mi->user,mi->host,mi->port,
- mysql_error(mysql), last_errno,
- mi->connect_retry,
- master_retry_count);
+ (reconnect ? "reconnecting" : "connecting"),
+ mi->user,mi->host,mi->port,
+ mysql_error(mysql), last_errno,
+ mi->connect_retry,
+ master_retry_count);
}
/*
By default we try forever. The reason is that failure will trigger
@@ -4468,19 +4468,19 @@ Error: '%s' errno: %d retry-time: %d retries: %d",
break;
}
safe_sleep(thd,mi->connect_retry,(CHECK_KILLED_FUNC)io_slave_killed,
- (void*)mi);
+ (void*)mi);
}
if (!slave_was_killed)
{
if (reconnect)
- {
+ {
if (!suppress_warnings && global_system_variables.log_warnings)
- sql_print_information("Slave: connected to master '%s@%s:%d',\
+ sql_print_information("Slave: connected to master '%s@%s:%d',\
replication resumed in log '%s' at position %s", mi->user,
- mi->host, mi->port,
- IO_RPL_LOG_NAME,
- llstr(mi->master_log_pos,llbuff));
+ mi->host, mi->port,
+ IO_RPL_LOG_NAME,
+ llstr(mi->master_log_pos,llbuff));
}
else
{
@@ -4490,7 +4490,7 @@ replication resumed in log '%s' at position %s", mi->user,
}
#ifdef SIGNAL_WITH_VIO_CLOSE
thd->set_active_vio(mysql->net.vio);
-#endif
+#endif
}
mysql->reconnect= 1;
DBUG_PRINT("exit",("slave_was_killed: %d", slave_was_killed));
@@ -4507,7 +4507,7 @@ replication resumed in log '%s' at position %s", mi->user,
*/
static int safe_reconnect(THD* thd, MYSQL* mysql, MASTER_INFO* mi,
- bool suppress_warnings)
+ bool suppress_warnings)
{
DBUG_ENTER("safe_reconnect");
DBUG_RETURN(connect_to_master(thd, mysql, mi, 1, suppress_warnings));
@@ -4520,7 +4520,7 @@ static int safe_reconnect(THD* thd, MYSQL* mysql, MASTER_INFO* mi,
SYNOPSIS
flush_relay_log_info()
- rli Relay log information
+ rli Relay log information
NOTES
- As this is only called by the slave thread, we don't need to
@@ -4539,8 +4539,8 @@ static int safe_reconnect(THD* thd, MYSQL* mysql, MASTER_INFO* mi,
longlong2str.
RETURN VALUES
- 0 ok
- 1 write error
+ 0 ok
+ 1 write error
*/
bool flush_relay_log_info(RELAY_LOG_INFO* rli)
@@ -4585,12 +4585,12 @@ static IO_CACHE *reopen_relay_log(RELAY_LOG_INFO *rli, const char **errmsg)
IO_CACHE *cur_log = rli->cur_log=&rli->cache_buf;
if ((rli->cur_log_fd=open_binlog(cur_log,rli->event_relay_log_name,
- errmsg)) <0)
+ errmsg)) <0)
DBUG_RETURN(0);
/*
We want to start exactly where we was before:
- relay_log_pos Current log pos
- pending Number of bytes already processed from the event
+ relay_log_pos Current log pos
+ pending Number of bytes already processed from the event
*/
rli->event_relay_log_pos= max(rli->event_relay_log_pos, BIN_LOG_HEADER_SIZE);
my_b_seek(cur_log,rli->event_relay_log_pos);
@@ -4622,7 +4622,7 @@ static Log_event* next_event(RELAY_LOG_INFO* rli)
pthread_cond_wait() with the non-data_lock mutex
*/
safe_mutex_assert_owner(&rli->data_lock);
-
+
while (!sql_slave_killed(thd,rli))
{
/*
@@ -4643,17 +4643,17 @@ static Log_event* next_event(RELAY_LOG_INFO* rli)
pthread_mutex_lock(log_lock);
/*
- Reading xxx_file_id is safe because the log will only
- be rotated when we hold relay_log.LOCK_log
+ Reading xxx_file_id is safe because the log will only
+ be rotated when we hold relay_log.LOCK_log
*/
if (rli->relay_log.get_open_count() != rli->cur_log_old_open_count)
{
- // The master has switched to a new log file; Reopen the old log file
- cur_log=reopen_relay_log(rli, &errmsg);
- pthread_mutex_unlock(log_lock);
- if (!cur_log) // No more log files
- goto err;
- hot_log=0; // Using old binary log
+ // The master has switched to a new log file; Reopen the old log file
+ cur_log=reopen_relay_log(rli, &errmsg);
+ pthread_mutex_unlock(log_lock);
+ if (!cur_log) // No more log files
+ goto err;
+ hot_log=0; // Using old binary log
}
}
@@ -4691,25 +4691,25 @@ static Log_event* next_event(RELAY_LOG_INFO* rli)
*/
rli->future_event_relay_log_pos= my_b_tell(cur_log);
if (hot_log)
- pthread_mutex_unlock(log_lock);
+ pthread_mutex_unlock(log_lock);
DBUG_RETURN(ev);
}
DBUG_ASSERT(thd==rli->sql_thd);
- if (opt_reckless_slave) // For mysql-test
+ if (opt_reckless_slave) // For mysql-test
cur_log->error = 0;
if (cur_log->error < 0)
{
errmsg = "slave SQL thread aborted because of I/O error";
if (hot_log)
- pthread_mutex_unlock(log_lock);
+ pthread_mutex_unlock(log_lock);
goto err;
}
if (!cur_log->error) /* EOF */
{
/*
- On a hot log, EOF means that there are no more updates to
- process and we must block until I/O thread adds some and
- signals us to continue
+ On a hot log, EOF means that there are no more updates to
+ process and we must block until I/O thread adds some and
+ signals us to continue
*/
if (hot_log)
{
@@ -4728,7 +4728,7 @@ static Log_event* next_event(RELAY_LOG_INFO* rli)
time_t save_timestamp= rli->last_master_timestamp;
rli->last_master_timestamp= 0;
- DBUG_ASSERT(rli->relay_log.get_open_count() ==
+ DBUG_ASSERT(rli->relay_log.get_open_count() ==
rli->cur_log_old_open_count);
if (rli->ign_master_log_name_end[0])
@@ -4750,14 +4750,14 @@ static Log_event* next_event(RELAY_LOG_INFO* rli)
DBUG_RETURN(ev);
}
- /*
- We can, and should release data_lock while we are waiting for
- update. If we do not, show slave status will block
- */
- pthread_mutex_unlock(&rli->data_lock);
+ /*
+ We can, and should release data_lock while we are waiting for
+ update. If we do not, show slave status will block
+ */
+ pthread_mutex_unlock(&rli->data_lock);
/*
- Possible deadlock :
+ Possible deadlock :
- the I/O thread has reached log_space_limit
- the SQL thread has read all relay logs, but cannot purge for some
reason:
@@ -4769,10 +4769,10 @@ static Log_event* next_event(RELAY_LOG_INFO* rli)
the I/O thread to temporarily ignore the log_space_limit
constraint, because we do not want the I/O thread to block because of
space (it's ok if it blocks for any other reason (e.g. because the
- master does not send anything). Then the I/O thread stops waiting
+ master does not send anything). Then the I/O thread stops waiting
and reads more events.
The SQL thread decides when the I/O thread should take log_space_limit
- into account again : ignore_log_space_limit is reset to 0
+ into account again : ignore_log_space_limit is reset to 0
in purge_first_log (when the SQL thread purges the just-read relay
log), and also when the SQL thread starts. We should also reset
ignore_log_space_limit to 0 when the user does RESET SLAVE, but in
@@ -4782,7 +4782,7 @@ static Log_event* next_event(RELAY_LOG_INFO* rli)
*/
pthread_mutex_lock(&rli->log_space_lock);
// prevent the I/O thread from blocking next times
- rli->ignore_log_space_limit= 1;
+ rli->ignore_log_space_limit= 1;
/*
If the I/O thread is blocked, unblock it.
Ok to broadcast after unlock, because the mutex is only destroyed in
@@ -4796,21 +4796,21 @@ static Log_event* next_event(RELAY_LOG_INFO* rli)
// re-acquire data lock since we released it earlier
pthread_mutex_lock(&rli->data_lock);
rli->last_master_timestamp= save_timestamp;
- continue;
+ continue;
}
/*
- If the log was not hot, we need to move to the next log in
- sequence. The next log could be hot or cold, we deal with both
- cases separately after doing some common initialization
+ If the log was not hot, we need to move to the next log in
+ sequence. The next log could be hot or cold, we deal with both
+ cases separately after doing some common initialization
*/
end_io_cache(cur_log);
DBUG_ASSERT(rli->cur_log_fd >= 0);
my_close(rli->cur_log_fd, MYF(MY_WME));
rli->cur_log_fd = -1;
-
+
if (relay_log_purge)
{
- /*
+ /*
purge_first_log will properly set up relay log coordinates in rli.
If the group's coordinates are equal to the event's coordinates
(i.e. the relay log was not rotated in the middle of a group),
@@ -4821,33 +4821,33 @@ static Log_event* next_event(RELAY_LOG_INFO* rli)
- I see no better detection method
- purge_first_log is not called that often
*/
- if (rli->relay_log.purge_first_log
+ if (rli->relay_log.purge_first_log
(rli,
rli->group_relay_log_pos == rli->event_relay_log_pos
&& !strcmp(rli->group_relay_log_name,rli->event_relay_log_name)))
- {
- errmsg = "Error purging processed logs";
- goto err;
- }
+ {
+ errmsg = "Error purging processed logs";
+ goto err;
+ }
}
else
{
- /*
- If hot_log is set, then we already have a lock on
- LOCK_log. If not, we have to get the lock.
-
- According to Sasha, the only time this code will ever be executed
- is if we are recovering from a bug.
- */
- if (rli->relay_log.find_next_log(&rli->linfo, !hot_log))
- {
- errmsg = "error switching to the next log";
- goto err;
- }
- rli->event_relay_log_pos = BIN_LOG_HEADER_SIZE;
- strmake(rli->event_relay_log_name,rli->linfo.log_file_name,
- sizeof(rli->event_relay_log_name)-1);
- flush_relay_log_info(rli);
+ /*
+ If hot_log is set, then we already have a lock on
+ LOCK_log. If not, we have to get the lock.
+
+ According to Sasha, the only time this code will ever be executed
+ is if we are recovering from a bug.
+ */
+ if (rli->relay_log.find_next_log(&rli->linfo, !hot_log))
+ {
+ errmsg = "error switching to the next log";
+ goto err;
+ }
+ rli->event_relay_log_pos = BIN_LOG_HEADER_SIZE;
+ strmake(rli->event_relay_log_name,rli->linfo.log_file_name,
+ sizeof(rli->event_relay_log_name)-1);
+ flush_relay_log_info(rli);
}
/*
@@ -4866,66 +4866,66 @@ static Log_event* next_event(RELAY_LOG_INFO* rli)
if (rli->relay_log.is_active(rli->linfo.log_file_name))
{
#ifdef EXTRA_DEBUG
- if (global_system_variables.log_warnings)
- sql_print_information("next log '%s' is currently active",
+ if (global_system_variables.log_warnings)
+ sql_print_information("next log '%s' is currently active",
rli->linfo.log_file_name);
-#endif
- rli->cur_log= cur_log= rli->relay_log.get_log_file();
- rli->cur_log_old_open_count= rli->relay_log.get_open_count();
- DBUG_ASSERT(rli->cur_log_fd == -1);
-
- /*
- Read pointer has to be at the start since we are the only
- reader.
+#endif
+ rli->cur_log= cur_log= rli->relay_log.get_log_file();
+ rli->cur_log_old_open_count= rli->relay_log.get_open_count();
+ DBUG_ASSERT(rli->cur_log_fd == -1);
+
+ /*
+ Read pointer has to be at the start since we are the only
+ reader.
We must keep the LOCK_log to read the 4 first bytes, as this is a hot
log (same as when we call read_log_event() above: for a hot log we
take the mutex).
- */
- if (check_binlog_magic(cur_log,&errmsg))
+ */
+ if (check_binlog_magic(cur_log,&errmsg))
{
if (!hot_log) pthread_mutex_unlock(log_lock);
- goto err;
+ goto err;
}
if (!hot_log) pthread_mutex_unlock(log_lock);
- continue;
+ continue;
}
if (!hot_log) pthread_mutex_unlock(log_lock);
/*
- if we get here, the log was not hot, so we will have to open it
- ourselves. We are sure that the log is still not hot now (a log can get
- from hot to cold, but not from cold to hot). No need for LOCK_log.
+ if we get here, the log was not hot, so we will have to open it
+ ourselves. We are sure that the log is still not hot now (a log can get
+ from hot to cold, but not from cold to hot). No need for LOCK_log.
*/
#ifdef EXTRA_DEBUG
if (global_system_variables.log_warnings)
- sql_print_information("next log '%s' is not active",
+ sql_print_information("next log '%s' is not active",
rli->linfo.log_file_name);
-#endif
+#endif
// open_binlog() will check the magic header
if ((rli->cur_log_fd=open_binlog(cur_log,rli->linfo.log_file_name,
- &errmsg)) <0)
- goto err;
+ &errmsg)) <0)
+ goto err;
}
else
{
/*
- Read failed with a non-EOF error.
- TODO: come up with something better to handle this error
+ Read failed with a non-EOF error.
+ TODO: come up with something better to handle this error
*/
if (hot_log)
- pthread_mutex_unlock(log_lock);
+ pthread_mutex_unlock(log_lock);
sql_print_error("Slave SQL thread: I/O error reading \
event(errno: %d cur_log->error: %d)",
- my_errno,cur_log->error);
+ my_errno,cur_log->error);
// set read position to the beginning of the event
my_b_seek(cur_log,rli->event_relay_log_pos);
/* otherwise, we have had a partial read */
errmsg = "Aborting slave SQL thread because of partial event read";
- break; // To end of function
+ break; // To end of function
}
}
if (!errmsg && global_system_variables.log_warnings)
{
- sql_print_information("Error reading relay log event: %s",
+ sql_print_information("Error reading relay log event: %s",
"slave SQL thread was killed");
DBUG_RETURN(0);
}
@@ -4939,7 +4939,7 @@ err:
/*
Rotate a relay log (this is used only by FLUSH LOGS; the automatic rotation
because of size is simpler because when we do it we already have all relevant
- locks; here we don't, so this function is mainly taking locks).
+ locks; here we don't, so this function is mainly taking locks).
Returns nothing as we cannot catch any error (MYSQL_BIN_LOG::new_file()
is void).
*/
@@ -4952,7 +4952,7 @@ void rotate_relay_log(MASTER_INFO* mi)
/* We don't lock rli->run_lock. This would lead to deadlocks. */
pthread_mutex_lock(&mi->run_lock);
- /*
+ /*
We need to test inited because otherwise, new_file() will attempt to lock
LOCK_log, which may not be inited (if we're not a slave).
*/
@@ -5001,7 +5001,7 @@ struct st_reload_entry
Sorted array of table names, please keep it sorted since we are
using bsearch() on it below.
*/
-static st_reload_entry s_mysql_tables[] =
+static st_reload_entry s_mysql_tables[] =
{
{ "columns_priv", st_relay_log_info::RELOAD_GRANT_F },
{ "db", st_relay_log_info::RELOAD_ACCESS_F },
@@ -5024,7 +5024,7 @@ static int reload_entry_compare(const void *lhs, const void *rhs)
}
void st_relay_log_info::touching_table(char const* db, char const* table,
- ulong table_id)
+ ulong table_id)
{
DBUG_ENTER("st_relay_log_info::touching_table");
@@ -5051,7 +5051,7 @@ void st_relay_log_info::touching_table(char const* db, char const* table,
DBUG_VOID_RETURN;
}
-void st_relay_log_info::transaction_end(THD* thd)
+void st_relay_log_info::transaction_end(THD* thd)
{
DBUG_ENTER("st_relay_log_info::transaction_end");
@@ -5106,4 +5106,3 @@ template class I_List_iterator<i_string_pair>;
#endif
#endif /* HAVE_REPLICATION */
-
diff --git a/sql/sp.cc b/sql/sp.cc
index 93e21170156..e794a461402 100644
--- a/sql/sp.cc
+++ b/sql/sp.cc
@@ -406,7 +406,8 @@ db_load_routine(THD *thd, int type, sp_name *name, sp_head **sphp,
{
LEX *old_lex= thd->lex, newlex;
String defstr;
- char olddb[128];
+ char old_db_buf[NAME_LEN+1];
+ LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) };
bool dbchanged;
ulong old_sql_mode= thd->variables.sql_mode;
ha_rows old_select_limit= thd->variables.select_limit;
@@ -450,9 +451,7 @@ db_load_routine(THD *thd, int type, sp_name *name, sp_head **sphp,
goto end;
}
- dbchanged= FALSE;
- if ((ret= sp_use_new_db(thd, name->m_db.str, olddb, sizeof(olddb),
- 1, &dbchanged)))
+ if ((ret= sp_use_new_db(thd, name->m_db, &old_db, 1, &dbchanged)))
goto end;
lex_start(thd, (uchar*)defstr.c_ptr(), defstr.length());
@@ -462,14 +461,14 @@ db_load_routine(THD *thd, int type, sp_name *name, sp_head **sphp,
{
sp_head *sp= newlex.sphead;
- if (dbchanged && (ret= mysql_change_db(thd, olddb, 1)))
+ if (dbchanged && (ret= mysql_change_db(thd, old_db.str, 1)))
goto end;
delete sp;
ret= SP_PARSE_ERROR;
}
else
{
- if (dbchanged && (ret= mysql_change_db(thd, olddb, 1)))
+ if (dbchanged && (ret= mysql_change_db(thd, old_db.str, 1)))
goto end;
*sphp= newlex.sphead;
(*sphp)->set_definer(&definer_user_name, &definer_host_name);
@@ -507,15 +506,14 @@ db_create_routine(THD *thd, int type, sp_head *sp)
int ret;
TABLE *table;
char definer[USER_HOST_BUFF_SIZE];
- char olddb[128];
+ char old_db_buf[NAME_LEN+1];
+ LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) };
bool dbchanged;
DBUG_ENTER("db_create_routine");
DBUG_PRINT("enter", ("type: %d name: %.*s",type,sp->m_name.length,
sp->m_name.str));
- dbchanged= FALSE;
- if ((ret= sp_use_new_db(thd, sp->m_db.str, olddb, sizeof(olddb),
- 0, &dbchanged)))
+ if ((ret= sp_use_new_db(thd, sp->m_db, &old_db, 0, &dbchanged)))
{
ret= SP_NO_DB_ERROR;
goto done;
@@ -642,7 +640,7 @@ db_create_routine(THD *thd, int type, sp_head *sp)
done:
close_thread_tables(thd);
if (dbchanged)
- (void)mysql_change_db(thd, olddb, 1);
+ (void) mysql_change_db(thd, old_db.str, 1);
DBUG_RETURN(ret);
}
@@ -1539,7 +1537,6 @@ static void sp_update_stmt_used_routines(THD *thd, LEX *lex, SQL_LIST *src,
first_no_prelock - If true, don't add tables or cache routines used by
the body of the first routine (i.e. *start)
will be executed in non-prelocked mode.
- tabs_changed - Set to TRUE some tables were added, FALSE otherwise
NOTE
If some function is missing this won't be reported here.
Instead this fact will be discovered during query execution.
@@ -1552,10 +1549,9 @@ static void sp_update_stmt_used_routines(THD *thd, LEX *lex, SQL_LIST *src,
static int
sp_cache_routines_and_add_tables_aux(THD *thd, LEX *lex,
Sroutine_hash_entry *start,
- bool first_no_prelock, bool *tabs_changed)
+ bool first_no_prelock)
{
int ret= 0;
- bool tabschnd= 0; /* Set if tables changed */
bool first= TRUE;
DBUG_ENTER("sp_cache_routines_and_add_tables_aux");
@@ -1628,15 +1624,13 @@ sp_cache_routines_and_add_tables_aux(THD *thd, LEX *lex,
{
sp_update_stmt_used_routines(thd, lex, &sp->m_sroutines,
rt->belong_to_view);
- tabschnd|=
- sp->add_used_tables_to_table_list(thd, &lex->query_tables_last,
- rt->belong_to_view);
+ (void)sp->add_used_tables_to_table_list(thd, &lex->query_tables_last,
+ rt->belong_to_view);
}
+ sp->propagate_attributes(lex);
}
first= FALSE;
}
- if (tabs_changed) /* it can be NULL */
- *tabs_changed= tabschnd;
DBUG_RETURN(ret);
}
@@ -1652,20 +1646,18 @@ sp_cache_routines_and_add_tables_aux(THD *thd, LEX *lex,
lex - LEX representing statement
first_no_prelock - If true, don't add tables or cache routines used by
the body of the first routine (i.e. *start)
- tabs_changed - Set to TRUE some tables were added, FALSE otherwise
-
+
RETURN VALUE
0 - success
non-0 - failure
*/
int
-sp_cache_routines_and_add_tables(THD *thd, LEX *lex, bool first_no_prelock,
- bool *tabs_changed)
+sp_cache_routines_and_add_tables(THD *thd, LEX *lex, bool first_no_prelock)
{
return sp_cache_routines_and_add_tables_aux(thd, lex,
(Sroutine_hash_entry *)lex->sroutines_list.first,
- first_no_prelock, tabs_changed);
+ first_no_prelock);
}
@@ -1692,9 +1684,8 @@ sp_cache_routines_and_add_tables_for_view(THD *thd, LEX *lex, TABLE_LIST *view)
(Sroutine_hash_entry **)lex->sroutines_list.next;
sp_update_stmt_used_routines(thd, lex, &view->view->sroutines_list,
view->top_table());
- return sp_cache_routines_and_add_tables_aux(thd, lex,
- *last_cached_routine_ptr, FALSE,
- NULL);
+ return sp_cache_routines_and_add_tables_aux(thd, lex,
+ *last_cached_routine_ptr, FALSE);
}
@@ -1729,20 +1720,21 @@ sp_cache_routines_and_add_tables_for_triggers(THD *thd, LEX *lex,
{
for (int j= 0; j < (int)TRG_ACTION_MAX; j++)
{
- if (triggers->bodies[i][j])
+ sp_head *trigger_body= triggers->bodies[i][j];
+ if (trigger_body)
{
- (void)triggers->bodies[i][j]->
- add_used_tables_to_table_list(thd, &lex->query_tables_last,
- table->belong_to_view);
+ (void)trigger_body->
+ add_used_tables_to_table_list(thd, &lex->query_tables_last,
+ table->belong_to_view);
sp_update_stmt_used_routines(thd, lex,
- &triggers->bodies[i][j]->m_sroutines,
+ &trigger_body->m_sroutines,
table->belong_to_view);
+ trigger_body->propagate_attributes(lex);
}
}
}
ret= sp_cache_routines_and_add_tables_aux(thd, lex,
- *last_cached_routine_ptr,
- FALSE, NULL);
+ *last_cached_routine_ptr, FALSE);
}
return ret;
}
@@ -1815,49 +1807,76 @@ create_string(THD *thd, String *buf,
}
-//
-// Utilities...
-//
+
+/*
+ Change the current database if needed.
+
+ SYNOPSIS
+ sp_use_new_db()
+ thd thread handle
+
+ new_db new database name (a string and its length)
+
+ old_db [IN] str points to a buffer where to store the old
+ database, length contains the size of the buffer
+ [OUT] if old db was not NULL, its name is copied
+ to the buffer pointed at by str and length is updated
+ accordingly. Otherwise str[0] is set to '\0' and length
+ is set to 0. The out parameter should be used only if
+ the database name has been changed (see dbchangedp).
+
+ dbchangedp [OUT] is set to TRUE if the current database is changed,
+ FALSE otherwise. A database is not changed if the old
+ name is the same as the new one, both names are empty,
+ or an error has occurred.
+
+ RETURN VALUE
+ 0 success
+ 1 access denied or out of memory (the error message is
+ set in THD)
+*/
int
-sp_use_new_db(THD *thd, char *newdb, char *olddb, uint olddblen,
+sp_use_new_db(THD *thd, LEX_STRING new_db, LEX_STRING *old_db,
bool no_access_check, bool *dbchangedp)
{
- bool changeit;
+ int ret;
+ static char empty_c_string[1]= {0}; /* used for not defined db */
DBUG_ENTER("sp_use_new_db");
- DBUG_PRINT("enter", ("newdb: %s", newdb));
+ DBUG_PRINT("enter", ("newdb: %s", new_db.str));
- if (! newdb)
- newdb= (char *)"";
- if (thd->db && thd->db[0])
+ /*
+ Set new_db to an empty string if it's NULL, because mysql_change_db
+ requires a non-NULL argument.
+ new_db.str can be NULL only if we're restoring the old database after
+ execution of a stored procedure and there were no current database
+ selected. The stored procedure itself must always have its database
+ initialized.
+ */
+ if (new_db.str == NULL)
+ new_db.str= empty_c_string;
+
+ if (thd->db)
{
- if (my_strcasecmp(system_charset_info, thd->db, newdb) == 0)
- changeit= 0;
- else
- {
- changeit= 1;
- strnmov(olddb, thd->db, olddblen);
- }
+ old_db->length= (strmake(old_db->str, thd->db, old_db->length) -
+ old_db->str);
}
else
- { // thd->db empty
- if (newdb[0])
- changeit= 1;
- else
- changeit= 0;
- olddb[0] = '\0';
+ {
+ old_db->str[0]= '\0';
+ old_db->length= 0;
}
- if (!changeit)
+
+ /* Don't change the database if the new name is the same as the old one. */
+ if (my_strcasecmp(system_charset_info, old_db->str, new_db.str) == 0)
{
*dbchangedp= FALSE;
DBUG_RETURN(0);
}
- else
- {
- int ret= mysql_change_db(thd, newdb, no_access_check);
- if (! ret)
- *dbchangedp= TRUE;
- DBUG_RETURN(ret);
- }
+ ret= mysql_change_db(thd, new_db.str, no_access_check);
+
+ *dbchangedp= ret == 0;
+ DBUG_RETURN(ret);
}
+
diff --git a/sql/sp.h b/sql/sp.h
index 2587a9b115a..80430791a5a 100644
--- a/sql/sp.h
+++ b/sql/sp.h
@@ -88,8 +88,7 @@ void sp_add_used_routine(LEX *lex, Query_arena *arena,
void sp_remove_not_own_routines(LEX *lex);
void sp_update_sp_used_routines(HASH *dst, HASH *src);
int sp_cache_routines_and_add_tables(THD *thd, LEX *lex,
- bool first_no_prelock,
- bool *tabs_changed);
+ bool first_no_prelock);
int sp_cache_routines_and_add_tables_for_view(THD *thd, LEX *lex,
TABLE_LIST *view);
int sp_cache_routines_and_add_tables_for_triggers(THD *thd, LEX *lex,
@@ -104,15 +103,15 @@ extern "C" byte* sp_sroutine_key(const byte *ptr, uint *plen, my_bool first);
TABLE *open_proc_table_for_read(THD *thd, Open_tables_state *backup);
void close_proc_table(THD *thd, Open_tables_state *backup);
-//
-// Utilities...
-//
-// Do a "use newdb". The current db is stored at olddb.
-// If newdb is the same as the current one, nothing is changed.
-// dbchangedp is set to true if the db was actually changed.
+/*
+ Do a "use new_db". The current db is stored at old_db. If new_db is the
+ same as the current one, nothing is changed. dbchangedp is set to true if
+ the db was actually changed.
+*/
+
int
-sp_use_new_db(THD *thd, char *newdb, char *olddb, uint olddbmax,
+sp_use_new_db(THD *thd, LEX_STRING new_db, LEX_STRING *old_db,
bool no_access_check, bool *dbchangedp);
#endif /* _SP_H_ */
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index 88460337526..8368e828fdc 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -385,24 +385,6 @@ sp_name::init_qname(THD *thd)
m_name.length, m_name.str);
}
-sp_name *
-sp_name_current_db_new(THD *thd, LEX_STRING name)
-{
- sp_name *qname;
-
- if (! thd->db)
- qname= new sp_name(name);
- else
- {
- LEX_STRING db;
-
- db.length= strlen(thd->db);
- db.str= thd->strmake(thd->db, db.length);
- qname= new sp_name(db, name);
- }
- qname->init_qname(thd);
- return qname;
-}
/*
Check that the name 'ident' is ok. It's assumed to be an 'ident'
@@ -513,14 +495,14 @@ sp_head::init_strings(THD *thd, LEX *lex, sp_name *name)
/* During parsing, we must use thd->mem_root */
MEM_ROOT *root= thd->mem_root;
- /* We have to copy strings to get them into the right memroot */
if (name)
{
+ /* Must be initialized in the parser */
+ DBUG_ASSERT(name->m_db.str && name->m_db.length);
+
+ /* We have to copy strings to get them into the right memroot */
m_db.length= name->m_db.length;
- if (name->m_db.length == 0)
- m_db.str= NULL;
- else
- m_db.str= strmake_root(root, name->m_db.str, name->m_db.length);
+ m_db.str= strmake_root(root, name->m_db.str, name->m_db.length);
m_name.length= name->m_name.length;
m_name.str= strmake_root(root, name->m_name.str, name->m_name.length);
@@ -529,10 +511,15 @@ sp_head::init_strings(THD *thd, LEX *lex, sp_name *name)
m_qname.length= name->m_qname.length;
m_qname.str= strmake_root(root, name->m_qname.str, m_qname.length);
}
- else if (thd->db)
+ else
{
- m_db.length= thd->db_length;
- m_db.str= strmake_root(root, thd->db, m_db.length);
+ /*
+ FIXME: the only use case when name is NULL is events, and it should
+ be rewritten soon. Remove the else part and replace 'if' with
+ an assert when this is done.
+ */
+ LEX_STRING str_reset= { NULL, 0 };
+ m_db= m_name= m_qname= str_reset;
}
if (m_param_begin && m_param_end)
@@ -949,7 +936,8 @@ bool
sp_head::execute(THD *thd)
{
DBUG_ENTER("sp_head::execute");
- char olddb[128];
+ char old_db_buf[NAME_LEN+1];
+ LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) };
bool dbchanged;
sp_rcontext *ctx;
bool err_status= FALSE;
@@ -996,10 +984,8 @@ sp_head::execute(THD *thd)
m_first_instance->m_last_cached_sp == this) ||
(m_recursion_level + 1 == m_next_cached_sp->m_recursion_level));
- dbchanged= FALSE;
if (m_db.length &&
- (err_status= sp_use_new_db(thd, m_db.str, olddb, sizeof(olddb), 0,
- &dbchanged)))
+ (err_status= sp_use_new_db(thd, m_db, &old_db, 0, &dbchanged)))
goto done;
if ((ctx= thd->spcont))
@@ -1170,10 +1156,10 @@ sp_head::execute(THD *thd)
{
/*
No access check when changing back to where we came from.
- (It would generate an error from mysql_change_db() when olddb=="")
+ (It would generate an error from mysql_change_db() when old_db=="")
*/
if (! thd->killed)
- err_status|= mysql_change_db(thd, olddb, 1);
+ err_status|= mysql_change_db(thd, old_db.str, 1);
}
m_flags&= ~IS_INVOKED;
DBUG_PRINT("info",
@@ -1675,6 +1661,16 @@ sp_head::restore_lex(THD *thd)
oldlex->next_state= sublex->next_state;
oldlex->trg_table_fields.push_back(&sublex->trg_table_fields);
+#ifdef HAVE_ROW_BASED_REPLICATION
+ /*
+ If this substatement needs row-based, the entire routine does too (we
+ cannot switch from statement-based to row-based only for this
+ substatement).
+ */
+ if (sublex->binlog_row_based_if_mixed)
+ m_flags|= BINLOG_ROW_BASED_IF_MIXED;
+#endif
+
/*
Add routines which are used by statement to respective set for
this routine.
@@ -1857,9 +1853,6 @@ sp_head::reset_thd_mem_root(THD *thd)
(ulong) &mem_root, (ulong) &thd->mem_root));
free_list= thd->free_list; // Keep the old list
thd->free_list= NULL; // Start a new one
- /* Copy the db, since substatements will point to it */
- m_thd_db= thd->db;
- thd->db= thd->strmake(thd->db, thd->db_length);
m_thd= thd;
DBUG_VOID_RETURN;
}
@@ -1875,7 +1868,6 @@ sp_head::restore_thd_mem_root(THD *thd)
DBUG_PRINT("info", ("mem_root 0x%lx returned from thd mem root 0x%lx",
(ulong) &mem_root, (ulong) &thd->mem_root));
thd->free_list= flist; // Restore the old one
- thd->db= m_thd_db; // Restore the original db pointer
thd->mem_root= m_thd_root;
m_thd= NULL;
DBUG_VOID_RETURN;
@@ -1941,8 +1933,11 @@ sp_head::show_create_procedure(THD *thd)
field_list.push_back(new Item_empty_string("Procedure", NAME_LEN));
field_list.push_back(new Item_empty_string("sql_mode", sql_mode_len));
// 1024 is for not to confuse old clients
- field_list.push_back(new Item_empty_string("Create Procedure",
- max(buffer.length(), 1024)));
+ Item_empty_string *definition=
+ new Item_empty_string("Create Procedure", max(buffer.length(),1024));
+ definition->maybe_null= TRUE;
+ field_list.push_back(definition);
+
if (protocol->send_fields(&field_list, Protocol::SEND_NUM_ROWS |
Protocol::SEND_EOF))
DBUG_RETURN(1);
@@ -1951,6 +1946,8 @@ sp_head::show_create_procedure(THD *thd)
protocol->store((char*) sql_mode_str, sql_mode_len, system_charset_info);
if (full_access)
protocol->store(m_defstr.str, m_defstr.length, system_charset_info);
+ else
+ protocol->store_null();
res= protocol->write();
send_eof(thd);
@@ -2006,8 +2003,11 @@ sp_head::show_create_function(THD *thd)
&sql_mode_len);
field_list.push_back(new Item_empty_string("Function",NAME_LEN));
field_list.push_back(new Item_empty_string("sql_mode", sql_mode_len));
- field_list.push_back(new Item_empty_string("Create Function",
- max(buffer.length(),1024)));
+ Item_empty_string *definition=
+ new Item_empty_string("Create Function", max(buffer.length(),1024));
+ definition->maybe_null= TRUE;
+ field_list.push_back(definition);
+
if (protocol->send_fields(&field_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
DBUG_RETURN(1);
@@ -2016,6 +2016,8 @@ sp_head::show_create_function(THD *thd)
protocol->store((char*) sql_mode_str, sql_mode_len, system_charset_info);
if (full_access)
protocol->store(m_defstr.str, m_defstr.length, system_charset_info);
+ else
+ protocol->store_null();
res= protocol->write();
send_eof(thd);
diff --git a/sql/sp_head.h b/sql/sp_head.h
index 791343f0061..4712647b6f4 100644
--- a/sql/sp_head.h
+++ b/sql/sp_head.h
@@ -61,13 +61,6 @@ public:
*/
LEX_STRING m_sroutines_key;
- sp_name(LEX_STRING name)
- : m_name(name)
- {
- m_db.str= m_qname.str= m_sroutines_key.str= 0;
- m_db.length= m_qname.length= m_sroutines_key.length= 0;
- }
-
sp_name(LEX_STRING db, LEX_STRING name)
: m_db(db), m_name(name)
{
@@ -101,8 +94,6 @@ public:
{}
};
-sp_name *
-sp_name_current_db_new(THD *thd, LEX_STRING name);
bool
check_routine_name(LEX_STRING name);
@@ -126,7 +117,8 @@ public:
/* Is set if a procedure with COMMIT (implicit or explicit) | ROLLBACK */
HAS_COMMIT_OR_ROLLBACK= 128,
LOG_SLOW_STATEMENTS= 256, // Used by events
- LOG_GENERAL_LOG= 512 // Used by events
+ LOG_GENERAL_LOG= 512, // Used by events
+ BINLOG_ROW_BASED_IF_MIXED= 1024
};
/* TYPE_ENUM_FUNCTION, TYPE_ENUM_PROCEDURE or TYPE_ENUM_TRIGGER */
@@ -351,12 +343,30 @@ public:
int show_routine_code(THD *thd);
#endif
+ /*
+ This method is intended for attributes of a routine which need
+ to propagate upwards to the LEX of the caller (when a property of a
+ sp_head needs to "taint" the caller).
+ */
+ void propagate_attributes(LEX *lex)
+ {
+#ifdef HAVE_ROW_BASED_REPLICATION
+ /*
+ If this routine needs row-based binary logging, the entire top statement
+ too (we cannot switch from statement-based to row-based only for this
+ routine, as in statement-based the top-statement may be binlogged and
+ the substatements not).
+ */
+ if (m_flags & BINLOG_ROW_BASED_IF_MIXED)
+ lex->binlog_row_based_if_mixed= TRUE;
+#endif
+ }
+
private:
MEM_ROOT *m_thd_root; // Temp. store for thd's mem_root
THD *m_thd; // Set if we have reset mem_root
- char *m_thd_db; // Original thd->db pointer
sp_pcontext *m_pcont; // Parse context
List<LEX> m_lex; // Temp. store for the other lex
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index 8066c41fd10..966d0f88ca3 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -264,8 +264,8 @@ my_bool acl_init(bool dont_read_acl_tables)
acl_cache= new hash_filo(ACL_CACHE_SIZE, 0, 0,
(hash_get_key) acl_entry_get_key,
(hash_free_key) free,
- /* Use the case sensitive "binary" charset */
- &my_charset_bin);
+ lower_case_file_system ?
+ system_charset_info : &my_charset_bin);
if (dont_read_acl_tables)
{
DBUG_RETURN(0); /* purecov: tested */
@@ -2049,8 +2049,7 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo,
}
else if ((error=table->file->ha_write_row(table->record[0]))) // insert
{ // This should never happen
- if (error && error != HA_ERR_FOUND_DUPP_KEY &&
- error != HA_ERR_FOUND_DUPP_UNIQUE) /* purecov: inspected */
+ if (table->file->is_fatal_error(error, HA_CHECK_DUP))
{
table->file->print_error(error,MYF(0)); /* purecov: deadcode */
error= -1; /* purecov: deadcode */
@@ -2172,7 +2171,7 @@ static int replace_db_table(TABLE *table, const char *db,
}
else if (rights && (error= table->file->ha_write_row(table->record[0])))
{
- if (error && error != HA_ERR_FOUND_DUPP_KEY) /* purecov: inspected */
+ if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
goto table_error; /* purecov: deadcode */
}
@@ -2744,7 +2743,7 @@ static int replace_table_table(THD *thd, GRANT_TABLE *grant_table,
else
{
error=table->file->ha_write_row(table->record[0]);
- if (error && error != HA_ERR_FOUND_DUPP_KEY)
+ if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
goto table_error; /* purecov: deadcode */
}
@@ -2862,7 +2861,7 @@ static int replace_routine_table(THD *thd, GRANT_NAME *grant_name,
else
{
error=table->file->ha_write_row(table->record[0]);
- if (error && error != HA_ERR_FOUND_DUPP_KEY)
+ if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
goto table_error;
}
@@ -2907,7 +2906,7 @@ bool mysql_table_grant(THD *thd, TABLE_LIST *table_list,
{
ulong column_priv= 0;
List_iterator <LEX_USER> str_list (user_list);
- LEX_USER *Str;
+ LEX_USER *Str, *tmp_Str;
TABLE_LIST tables[3];
bool create_new_users=0;
char *db_name, *table_name;
@@ -3033,10 +3032,15 @@ bool mysql_table_grant(THD *thd, TABLE_LIST *table_list,
thd->mem_root= &memex;
grant_version++;
- while ((Str = str_list++))
+ while ((tmp_Str = str_list++))
{
int error;
GRANT_TABLE *grant_table;
+ if (!(Str= get_current_user(thd, tmp_Str)))
+ {
+ result= TRUE;
+ continue;
+ }
if (Str->host.length > HOSTNAME_LENGTH ||
Str->user.length > USERNAME_LENGTH)
{
@@ -3172,7 +3176,7 @@ bool mysql_routine_grant(THD *thd, TABLE_LIST *table_list, bool is_proc,
bool revoke_grant, bool no_error)
{
List_iterator <LEX_USER> str_list (user_list);
- LEX_USER *Str;
+ LEX_USER *Str, *tmp_Str;
TABLE_LIST tables[2];
bool create_new_users=0, result=0;
char *db_name, *table_name;
@@ -3240,10 +3244,15 @@ bool mysql_routine_grant(THD *thd, TABLE_LIST *table_list, bool is_proc,
DBUG_PRINT("info",("now time to iterate and add users"));
- while ((Str= str_list++))
+ while ((tmp_Str= str_list++))
{
int error;
GRANT_NAME *grant_name;
+ if (!(Str= get_current_user(thd, tmp_Str)))
+ {
+ result= TRUE;
+ continue;
+ }
if (Str->host.length > HOSTNAME_LENGTH ||
Str->user.length > USERNAME_LENGTH)
{
@@ -3312,7 +3321,7 @@ bool mysql_grant(THD *thd, const char *db, List <LEX_USER> &list,
ulong rights, bool revoke_grant)
{
List_iterator <LEX_USER> str_list (list);
- LEX_USER *Str;
+ LEX_USER *Str, *tmp_Str;
char tmp_db[NAME_LEN+1];
bool create_new_users=0;
TABLE_LIST tables[2];
@@ -3371,8 +3380,13 @@ bool mysql_grant(THD *thd, const char *db, List <LEX_USER> &list,
grant_version++;
int result=0;
- while ((Str = str_list++))
+ while ((tmp_Str = str_list++))
{
+ if (!(Str= get_current_user(thd, tmp_Str)))
+ {
+ result= TRUE;
+ continue;
+ }
if (Str->host.length > HOSTNAME_LENGTH ||
Str->user.length > USERNAME_LENGTH)
{
@@ -5335,7 +5349,7 @@ bool mysql_create_user(THD *thd, List <LEX_USER> &list)
int result;
String wrong_users;
ulong sql_mode;
- LEX_USER *user_name;
+ LEX_USER *user_name, *tmp_user_name;
List_iterator <LEX_USER> user_list(list);
TABLE_LIST tables[GRANT_TABLES];
DBUG_ENTER("mysql_create_user");
@@ -5347,8 +5361,13 @@ bool mysql_create_user(THD *thd, List <LEX_USER> &list)
rw_wrlock(&LOCK_grant);
VOID(pthread_mutex_lock(&acl_cache->lock));
- while ((user_name= user_list++))
+ while ((tmp_user_name= user_list++))
{
+ if (!(user_name= get_current_user(thd, tmp_user_name)))
+ {
+ result= TRUE;
+ continue;
+ }
/*
Search all in-memory structures and grant tables
for a mention of the new user name.
@@ -5394,7 +5413,7 @@ bool mysql_drop_user(THD *thd, List <LEX_USER> &list)
{
int result;
String wrong_users;
- LEX_USER *user_name;
+ LEX_USER *user_name, *tmp_user_name;
List_iterator <LEX_USER> user_list(list);
TABLE_LIST tables[GRANT_TABLES];
DBUG_ENTER("mysql_drop_user");
@@ -5406,8 +5425,14 @@ bool mysql_drop_user(THD *thd, List <LEX_USER> &list)
rw_wrlock(&LOCK_grant);
VOID(pthread_mutex_lock(&acl_cache->lock));
- while ((user_name= user_list++))
+ while ((tmp_user_name= user_list++))
{
+ user_name= get_current_user(thd, tmp_user_name);
+ if (!(user_name= get_current_user(thd, tmp_user_name)))
+ {
+ result= TRUE;
+ continue;
+ }
if (handle_grant_data(tables, 1, user_name, NULL) <= 0)
{
append_user(&wrong_users, user_name);
@@ -5444,8 +5469,8 @@ bool mysql_rename_user(THD *thd, List <LEX_USER> &list)
{
int result;
String wrong_users;
- LEX_USER *user_from;
- LEX_USER *user_to;
+ LEX_USER *user_from, *tmp_user_from;
+ LEX_USER *user_to, *tmp_user_to;
List_iterator <LEX_USER> user_list(list);
TABLE_LIST tables[GRANT_TABLES];
DBUG_ENTER("mysql_rename_user");
@@ -5457,9 +5482,19 @@ bool mysql_rename_user(THD *thd, List <LEX_USER> &list)
rw_wrlock(&LOCK_grant);
VOID(pthread_mutex_lock(&acl_cache->lock));
- while ((user_from= user_list++))
+ while ((tmp_user_from= user_list++))
{
- user_to= user_list++;
+ if (!(user_from= get_current_user(thd, tmp_user_from)))
+ {
+ result= TRUE;
+ continue;
+ }
+ tmp_user_to= user_list++;
+ if (!(user_to= get_current_user(thd, tmp_user_to)))
+ {
+ result= TRUE;
+ continue;
+ }
DBUG_ASSERT(user_to != 0); /* Syntax enforces pairs of users. */
/*
@@ -5514,10 +5549,15 @@ bool mysql_revoke_all(THD *thd, List <LEX_USER> &list)
rw_wrlock(&LOCK_grant);
VOID(pthread_mutex_lock(&acl_cache->lock));
- LEX_USER *lex_user;
+ LEX_USER *lex_user, *tmp_lex_user;
List_iterator <LEX_USER> user_list(list);
- while ((lex_user=user_list++))
+ while ((tmp_lex_user= user_list++))
{
+ if (!(lex_user= get_current_user(thd, tmp_lex_user)))
+ {
+ result= -1;
+ continue;
+ }
if (!find_acl_user(lex_user->host.str, lex_user->user.str, TRUE))
{
sql_print_error("REVOKE ALL PRIVILEGES, GRANT: User '%s'@'%s' does not "
@@ -5750,25 +5790,30 @@ bool sp_grant_privileges(THD *thd, const char *sp_db, const char *sp_name,
TABLE_LIST tables[1];
List<LEX_USER> user_list;
bool result;
+ ACL_USER *au;
+ char passwd_buff[SCRAMBLED_PASSWORD_CHAR_LENGTH+1];
DBUG_ENTER("sp_grant_privileges");
if (!(combo=(LEX_USER*) thd->alloc(sizeof(st_lex_user))))
DBUG_RETURN(TRUE);
combo->user.str= sctx->user;
-
+
VOID(pthread_mutex_lock(&acl_cache->lock));
- if (!find_acl_user(combo->host.str=(char*)sctx->host_or_ip, combo->user.str,
- FALSE) &&
- !find_acl_user(combo->host.str=(char*)sctx->host, combo->user.str,
- FALSE) &&
- !find_acl_user(combo->host.str=(char*)sctx->ip, combo->user.str,
- FALSE) &&
- !find_acl_user(combo->host.str=(char*)"%", combo->user.str, FALSE))
- {
- VOID(pthread_mutex_unlock(&acl_cache->lock));
- DBUG_RETURN(TRUE);
- }
+
+ if ((au= find_acl_user(combo->host.str=(char*)sctx->host_or_ip,combo->user.str,FALSE)))
+ goto found_acl;
+ if ((au= find_acl_user(combo->host.str=(char*)sctx->host, combo->user.str,FALSE)))
+ goto found_acl;
+ if ((au= find_acl_user(combo->host.str=(char*)sctx->ip, combo->user.str,FALSE)))
+ goto found_acl;
+ if((au= find_acl_user(combo->host.str=(char*)"%", combo->user.str, FALSE)))
+ goto found_acl;
+
+ VOID(pthread_mutex_unlock(&acl_cache->lock));
+ DBUG_RETURN(TRUE);
+
+ found_acl:
VOID(pthread_mutex_unlock(&acl_cache->lock));
bzero((char*)tables, sizeof(TABLE_LIST));
@@ -5776,13 +5821,37 @@ bool sp_grant_privileges(THD *thd, const char *sp_db, const char *sp_name,
tables->db= (char*)sp_db;
tables->table_name= tables->alias= (char*)sp_name;
-
+
combo->host.length= strlen(combo->host.str);
combo->user.length= strlen(combo->user.str);
combo->host.str= thd->strmake(combo->host.str,combo->host.length);
combo->user.str= thd->strmake(combo->user.str,combo->user.length);
- combo->password.str= (char*)"";
- combo->password.length= 0;
+
+
+ if(au && au->salt_len)
+ {
+ if (au->salt_len == SCRAMBLE_LENGTH)
+ {
+ make_password_from_salt(passwd_buff, au->salt);
+ combo->password.length= SCRAMBLED_PASSWORD_CHAR_LENGTH;
+ }
+ else if (au->salt_len == SCRAMBLE_LENGTH_323)
+ {
+ make_password_from_salt_323(passwd_buff, (ulong *) au->salt);
+ combo->password.length= SCRAMBLED_PASSWORD_CHAR_LENGTH_323;
+ }
+ else
+ {
+ my_error(ER_PASSWD_LENGTH, MYF(0), SCRAMBLED_PASSWORD_CHAR_LENGTH);
+ return -1;
+ }
+ combo->password.str= passwd_buff;
+ }
+ else
+ {
+ combo->password.str= (char*)"";
+ combo->password.length= 0;
+ }
if (user_list.push_back(combo))
DBUG_RETURN(TRUE);
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 5b039f6bcc0..e40b9721911 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -49,6 +49,8 @@ static bool open_new_frm(THD *thd, TABLE_SHARE *share, const char *alias,
static void close_old_data_files(THD *thd, TABLE *table, bool abort_locks,
bool send_refresh);
static bool reopen_table(TABLE *table);
+static bool
+has_two_write_locked_tables_with_auto_increment(TABLE_LIST *tables);
extern "C" byte *table_cache_key(const byte *record,uint *length,
@@ -1104,7 +1106,7 @@ void close_thread_tables(THD *thd, bool lock_in_use, bool skip_derived)
if (found_old_table)
{
/* Tell threads waiting for refresh that something has happened */
- VOID(pthread_cond_broadcast(&COND_refresh));
+ broadcast_refresh();
}
if (!lock_in_use)
VOID(pthread_mutex_unlock(&LOCK_open));
@@ -1179,135 +1181,134 @@ static inline uint tmpkeyval(THD *thd, TABLE *table)
void close_temporary_tables(THD *thd)
{
- TABLE *next, *prev_table, *table;
- char *query= 0, *end;
- uint query_buf_size, max_names_len;
- bool found_user_tables;
-
+ TABLE *table;
if (!thd->temporary_tables)
return;
-
- LINT_INIT(end);
- query_buf_size= 50; // Enough for DROP ... TABLE IF EXISTS
- /*
- insertion sort of temp tables by pseudo_thread_id to build ordered list
+ if (!mysql_bin_log.is_open() || thd->current_stmt_binlog_row_based)
+ {
+ TABLE *next;
+ for (table= thd->temporary_tables; table; table= next)
+ {
+ next=table->next;
+ close_temporary(table, 1, 1);
+ }
+ thd->temporary_tables= 0;
+ return;
+ }
+
+ TABLE *next,
+ *prev_table /* TODO: 5.1 maintaines prev link in temporary_tables
+ double-linked list so we could fix it. But it is not necessary
+ at this time when the list is being destroyed */;
+ bool was_quote_show= true; /* to assume thd->options has OPTION_QUOTE_SHOW_CREATE */
+ // Better add "if exists", in case a RESET MASTER has been done
+ const char stub[]= "DROP /*!40005 TEMPORARY */ TABLE IF EXISTS ";
+ uint stub_len= sizeof(stub) - 1;
+ char buf[256];
+ memcpy(buf, stub, stub_len);
+ String s_query= String(buf, sizeof(buf), system_charset_info);
+ bool found_user_tables= false;
+ LINT_INIT(next);
+
+ /*
+ insertion sort of temp tables by pseudo_thread_id to build ordered list
of sublists of equal pseudo_thread_id
*/
- for (prev_table= thd->temporary_tables,
- table= prev_table->next,
- found_user_tables= (prev_table->s->table_name.str[0] != '#');
+
+ for (prev_table= thd->temporary_tables, table= prev_table->next;
table;
prev_table= table, table= table->next)
{
- TABLE *prev_sorted /* same as for prev_table */,
- *sorted;
- /*
- table not created directly by the user is moved to the tail.
- Fixme/todo: nothing (I checked the manual) prevents user to create temp
- with `#'
- */
- if (table->s->table_name.str[0] == '#')
- continue;
- else
+ TABLE *prev_sorted /* same as for prev_table */, *sorted;
+ if (is_user_table(table))
{
- found_user_tables = 1;
- }
- for (prev_sorted= NULL, sorted= thd->temporary_tables; sorted != table;
- prev_sorted= sorted, sorted= sorted->next)
- {
- if (sorted->s->table_name.str[0] == '#' || tmpkeyval(thd, sorted) > tmpkeyval(thd, table))
+ if (!found_user_tables)
+ found_user_tables= true;
+ for (prev_sorted= NULL, sorted= thd->temporary_tables; sorted != table;
+ prev_sorted= sorted, sorted= sorted->next)
{
- /* move into the sorted part of the list from the unsorted */
- prev_table->next= table->next;
- table->next= sorted;
- if (prev_sorted)
- {
- prev_sorted->next= table;
- }
- else
+ if (!is_user_table(sorted) ||
+ tmpkeyval(thd, sorted) > tmpkeyval(thd, table))
{
- thd->temporary_tables= table;
+ /* move into the sorted part of the list from the unsorted */
+ prev_table->next= table->next;
+ table->next= sorted;
+ if (prev_sorted)
+ {
+ prev_sorted->next= table;
+ }
+ else
+ {
+ thd->temporary_tables= table;
+ }
+ table= prev_table;
+ break;
}
- table= prev_table;
- break;
}
}
- }
- /*
- calc query_buf_size as max per sublists, one sublist per pseudo thread id.
- Also stop at first occurence of `#'-named table that starts
- all implicitly created temp tables
- */
- for (max_names_len= 0, table=thd->temporary_tables;
- table && table->s->table_name.str[0] != '#';
- table=table->next)
- {
- uint tmp_names_len;
- for (tmp_names_len= table->s->table_cache_key.length + 1;
- table->next && table->s->table_name.str[0] != '#' &&
- tmpkeyval(thd, table) == tmpkeyval(thd, table->next);
- table=table->next)
- {
- /*
- We are going to add 4 ` around the db/table names, so 1 might not look
- enough; indeed it is enough, because table->s->table_cache_key.length is
- greater (by 8, because of server_id and thread_id) than db||table.
- */
- tmp_names_len += table->next->s->table_cache_key.length + 1;
- }
- if (tmp_names_len > max_names_len) max_names_len= tmp_names_len;
}
-
- /* allocate */
- if (found_user_tables && mysql_bin_log.is_open() &&
- !thd->current_stmt_binlog_row_based &&
- (query = alloc_root(thd->mem_root, query_buf_size+= max_names_len)))
- // Better add "if exists", in case a RESET MASTER has been done
- end= strmov(query, "DROP /*!40005 TEMPORARY */ TABLE IF EXISTS ");
+
+ /* We always quote db,table names though it is slight overkill */
+ if (found_user_tables &&
+ !(was_quote_show= (thd->options & OPTION_QUOTE_SHOW_CREATE)))
+ {
+ thd->options |= OPTION_QUOTE_SHOW_CREATE;
+ }
/* scan sorted tmps to generate sequence of DROP */
- for (table=thd->temporary_tables; table; table= next)
+ for (table= thd->temporary_tables; table; table= next)
{
- if (query // we might be out of memory, but this is not fatal
- && table->s->table_name.str[0] != '#')
+ if (is_user_table(table))
{
- char *end_cur;
/* Set pseudo_thread_id to be that of the processed table */
thd->variables.pseudo_thread_id= tmpkeyval(thd, table);
/* Loop forward through all tables within the sublist of
common pseudo_thread_id to create single DROP query */
- for (end_cur= end;
- table && table->s->table_name.str[0] != '#' &&
+ for (s_query.length(stub_len);
+ table && is_user_table(table) &&
tmpkeyval(thd, table) == thd->variables.pseudo_thread_id;
table= next)
{
- end_cur= strxmov(end_cur, "`", table->s->db.str, "`.`",
- table->s->table_name.str, "`,", NullS);
+ /*
+ We are going to add 4 ` around the db/table names and possible more
+ due to special characters in the names
+ */
+ append_identifier(thd, &s_query, table->s->db.str, strlen(table->s->db.str));
+ s_query.q_append('.');
+ append_identifier(thd, &s_query, table->s->table_name.str,
+ strlen(table->s->table_name.str));
+ s_query.q_append(',');
next= table->next;
close_temporary(table, 1, 1);
}
thd->clear_error();
- /* The -1 is to remove last ',' */
- Query_log_event qinfo(thd, query, (ulong)(end_cur - query) - 1, 0, FALSE);
+ CHARSET_INFO *cs_save= thd->variables.character_set_client;
+ thd->variables.character_set_client= system_charset_info;
+ Query_log_event qinfo(thd, s_query.ptr(),
+ s_query.length() - 1 /* to remove trailing ',' */,
+ 0, FALSE);
+ thd->variables.character_set_client= cs_save;
/*
- Imagine the thread had created a temp table, then was doing a SELECT,
- and the SELECT was killed. Then it's not clever to mark the statement
- above as "killed", because it's not really a statement updating data,
- and there are 99.99% chances it will succeed on slave. If a real update
- (one updating a persistent table) was killed on the master, then this
- real update will be logged with error_code=killed, rightfully causing
- the slave to stop.
+ Imagine the thread had created a temp table, then was doing a SELECT, and
+ the SELECT was killed. Then it's not clever to mark the statement above as
+ "killed", because it's not really a statement updating data, and there
+ are 99.99% chances it will succeed on slave.
+ If a real update (one updating a persistent table) was killed on the
+ master, then this real update will be logged with error_code=killed,
+ rightfully causing the slave to stop.
*/
qinfo.error_code= 0;
mysql_bin_log.write(&qinfo);
}
- else
+ else
{
next= table->next;
close_temporary(table, 1, 1);
}
}
+ if (!was_quote_show)
+ thd->options &= ~OPTION_QUOTE_SHOW_CREATE; /* restore option */
thd->temporary_tables=0;
}
@@ -1674,7 +1675,7 @@ TABLE *unlink_open_table(THD *thd, TABLE *list, TABLE *find)
}
*prev=0;
// Notify any 'refresh' threads
- pthread_cond_broadcast(&COND_refresh);
+ broadcast_refresh();
return start;
}
@@ -2224,7 +2225,7 @@ static bool reopen_table(TABLE *table)
if (table->triggers)
table->triggers->set_table(table);
- VOID(pthread_cond_broadcast(&COND_refresh));
+ broadcast_refresh();
error=0;
end:
@@ -2325,7 +2326,7 @@ bool reopen_tables(THD *thd,bool get_locks,bool in_refresh)
{
my_afree((gptr) tables);
}
- VOID(pthread_cond_broadcast(&COND_refresh)); // Signal to refresh
+ broadcast_refresh();
*prev=0;
DBUG_RETURN(error);
}
@@ -2361,7 +2362,7 @@ void close_old_data_files(THD *thd, TABLE *table, bool abort_locks,
}
}
if (found)
- VOID(pthread_cond_broadcast(&COND_refresh)); // Signal to refresh
+ broadcast_refresh();
DBUG_VOID_RETURN;
}
@@ -2514,6 +2515,8 @@ TABLE *drop_locked_tables(THD *thd,const char *db, const char *table_name)
}
}
*prev=0;
+ if (found)
+ broadcast_refresh();
if (thd->locked_tables && thd->locked_tables->table_count == 0)
{
my_free((gptr) thd->locked_tables,MYF(0));
@@ -2852,25 +2855,18 @@ int open_tables(THD *thd, TABLE_LIST **start, uint *counter, uint flags)
statement for which table list for prelocking is already built, let
us cache routines and try to build such table list.
- NOTE: We will mark statement as requiring prelocking only if we will
- have non empty table list. But this does not guarantee that in prelocked
- mode we will have some locked tables, because queries which use only
- derived/information schema tables and views possible. Thus "counter"
- may be still zero for prelocked statement...
*/
if (!thd->prelocked_mode && !thd->lex->requires_prelocking() &&
thd->lex->sroutines_list.elements)
{
- bool first_no_prelocking, need_prelocking, tabs_changed;
+ bool first_no_prelocking, need_prelocking;
TABLE_LIST **save_query_tables_last= thd->lex->query_tables_last;
DBUG_ASSERT(thd->lex->query_tables == *start);
sp_get_prelocking_info(thd, &need_prelocking, &first_no_prelocking);
- if (sp_cache_routines_and_add_tables(thd, thd->lex,
- first_no_prelocking,
- &tabs_changed))
+ if (sp_cache_routines_and_add_tables(thd, thd->lex, first_no_prelocking))
{
/*
Serious error during reading stored routines from mysql.proc table.
@@ -2880,7 +2876,7 @@ int open_tables(THD *thd, TABLE_LIST **start, uint *counter, uint flags)
result= -1;
goto err;
}
- else if ((tabs_changed || *start) && need_prelocking)
+ else if (need_prelocking)
{
query_tables_last_own= save_query_tables_last;
*start= thd->lex->query_tables;
@@ -3307,15 +3303,18 @@ int lock_tables(THD *thd, TABLE_LIST *tables, uint count, bool *need_reopen)
in prelocked mode.
*/
DBUG_ASSERT(!thd->prelocked_mode || !thd->lex->requires_prelocking());
- /*
- If statement requires prelocking then it has non-empty table list.
- So it is safe to shortcut.
- */
- DBUG_ASSERT(!thd->lex->requires_prelocking() || tables);
*need_reopen= FALSE;
- if (!tables)
+#ifdef HAVE_ROW_BASED_REPLICATION
+ /*
+ CREATE ... SELECT UUID() locks no tables, we have to test here.
+ */
+ if (thd->lex->binlog_row_based_if_mixed)
+ thd->set_current_stmt_binlog_row_based_if_mixed();
+#endif /*HAVE_ROW_BASED_REPLICATION*/
+
+ if (!tables && !thd->lex->requires_prelocking())
DBUG_RETURN(0);
/*
@@ -3345,6 +3344,19 @@ int lock_tables(THD *thd, TABLE_LIST *tables, uint count, bool *need_reopen)
{
thd->in_lock_tables=1;
thd->options|= OPTION_TABLE_LOCK;
+#ifdef HAVE_ROW_BASED_REPLICATION
+ /*
+ If we have >= 2 different tables to update with auto_inc columns,
+ statement-based binlogging won't work. We can solve this problem in
+ mixed mode by switching to row-based binlogging:
+ */
+ if (thd->variables.binlog_format == BINLOG_FORMAT_MIXED &&
+ has_two_write_locked_tables_with_auto_increment(tables))
+ {
+ thd->lex->binlog_row_based_if_mixed= TRUE;
+ thd->set_current_stmt_binlog_row_based_if_mixed();
+ }
+#endif
}
if (! (thd->lock= mysql_lock_tables(thd, start, (uint) (ptr - start),
@@ -4962,36 +4974,48 @@ store_top_level_join_columns(THD *thd, TABLE_LIST *table_ref,
if (table_ref->nested_join)
{
List_iterator_fast<TABLE_LIST> nested_it(table_ref->nested_join->join_list);
- TABLE_LIST *cur_left_neighbor= nested_it++;
- TABLE_LIST *cur_right_neighbor= NULL;
+ TABLE_LIST *same_level_left_neighbor= nested_it++;
+ TABLE_LIST *same_level_right_neighbor= NULL;
+ /* Left/right-most neighbors, possibly at higher levels in the join tree. */
+ TABLE_LIST *real_left_neighbor, *real_right_neighbor;
- while (cur_left_neighbor)
+ while (same_level_left_neighbor)
{
- TABLE_LIST *cur_table_ref= cur_left_neighbor;
- cur_left_neighbor= nested_it++;
+ TABLE_LIST *cur_table_ref= same_level_left_neighbor;
+ same_level_left_neighbor= nested_it++;
/*
The order of RIGHT JOIN operands is reversed in 'join list' to
transform it into a LEFT JOIN. However, in this procedure we need
the join operands in their lexical order, so below we reverse the
- join operands. Notice that this happens only in the first loop, and
- not in the second one, as in the second loop cur_left_neighbor == NULL.
- This is the correct behavior, because the second loop
- sets cur_table_ref reference correctly after the join operands are
+ join operands. Notice that this happens only in the first loop,
+ and not in the second one, as in the second loop
+ same_level_left_neighbor == NULL.
+ This is the correct behavior, because the second loop sets
+ cur_table_ref reference correctly after the join operands are
swapped in the first loop.
*/
- if (cur_left_neighbor &&
+ if (same_level_left_neighbor &&
cur_table_ref->outer_join & JOIN_TYPE_RIGHT)
{
/* This can happen only for JOIN ... ON. */
DBUG_ASSERT(table_ref->nested_join->join_list.elements == 2);
- swap_variables(TABLE_LIST*, cur_left_neighbor, cur_table_ref);
+ swap_variables(TABLE_LIST*, same_level_left_neighbor, cur_table_ref);
}
+ /*
+ Pick the parent's left and right neighbors if there are no immediate
+ neighbors at the same level.
+ */
+ real_left_neighbor= (same_level_left_neighbor) ?
+ same_level_left_neighbor : left_neighbor;
+ real_right_neighbor= (same_level_right_neighbor) ?
+ same_level_right_neighbor : right_neighbor;
+
if (cur_table_ref->nested_join &&
store_top_level_join_columns(thd, cur_table_ref,
- cur_left_neighbor, cur_right_neighbor))
+ real_left_neighbor, real_right_neighbor))
goto err;
- cur_right_neighbor= cur_table_ref;
+ same_level_right_neighbor= cur_table_ref;
}
}
@@ -5864,12 +5888,17 @@ fill_record(THD * thd, List<Item> &fields, List<Item> &values,
bool ignore_errors)
{
List_iterator_fast<Item> f(fields),v(values);
- Item *value;
+ Item *value, *fld;
Item_field *field;
DBUG_ENTER("fill_record");
- while ((field=(Item_field*) f++))
+ while ((fld= f++))
{
+ if (!(field= fld->filed_for_view_update()))
+ {
+ my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), fld->name);
+ DBUG_RETURN(TRUE);
+ }
value=v++;
Field *rfield= field->field;
TABLE *table= rfield->table;
@@ -6194,7 +6223,7 @@ bool remove_table_from_cache(THD *thd, const char *db, const char *table_name,
Signal any thread waiting for tables to be freed to
reopen their tables
*/
- (void) pthread_cond_broadcast(&COND_refresh);
+ broadcast_refresh();
DBUG_PRINT("info", ("Waiting for refresh signal"));
if (!(flags & RTFC_CHECK_KILLED_FLAG) || !thd->killed)
{
@@ -6477,3 +6506,46 @@ void mysql_wait_completed_table(ALTER_PARTITION_PARAM_TYPE *lpt, TABLE *my_table
DBUG_VOID_RETURN;
}
+
+/*
+ Tells if two (or more) tables have auto_increment columns and we want to
+ lock those tables with a write lock.
+
+ SYNOPSIS
+ has_two_write_locked_tables_with_auto_increment
+ tables Table list
+
+ NOTES:
+ Call this function only when you have established the list of all tables
+ which you'll want to update (including stored functions, triggers, views
+ inside your statement).
+
+ RETURN
+ 0 No
+ 1 Yes
+*/
+
+static bool
+has_two_write_locked_tables_with_auto_increment(TABLE_LIST *tables)
+{
+ char *first_table_name= NULL, *first_db;
+ for (TABLE_LIST *table= tables; table; table= table->next_global)
+ {
+ /* we must do preliminary checks as table->table may be NULL */
+ if (!table->placeholder() && !table->schema_table &&
+ table->table->found_next_number_field &&
+ (table->lock_type >= TL_WRITE_ALLOW_WRITE))
+ {
+ if (first_table_name == NULL)
+ {
+ first_table_name= table->table_name;
+ first_db= table->db;
+ DBUG_ASSERT(first_db);
+ }
+ else if (strcmp(first_db, table->db) ||
+ strcmp(first_table_name, table->table_name))
+ return 1;
+ }
+ }
+ return 0;
+}
diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc
index 6e8a559ee07..e73dd4b1d09 100644
--- a/sql/sql_cache.cc
+++ b/sql/sql_cache.cc
@@ -814,6 +814,7 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used)
flags.time_zone= thd->variables.time_zone;
flags.sql_mode= thd->variables.sql_mode;
flags.max_sort_length= thd->variables.max_sort_length;
+ flags.lc_time_names= thd->variables.lc_time_names;
flags.group_concat_max_len= thd->variables.group_concat_max_len;
DBUG_PRINT("qcache", ("long %d, 4.1: %d, more results %d, pkt_nr: %d, \
CS client: %u, CS result: %u, CS conn: %u, limit: %lu, TZ: 0x%lx, \
@@ -1049,6 +1050,7 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
flags.sql_mode= thd->variables.sql_mode;
flags.max_sort_length= thd->variables.max_sort_length;
flags.group_concat_max_len= thd->variables.group_concat_max_len;
+ flags.lc_time_names= thd->variables.lc_time_names;
DBUG_PRINT("qcache", ("long %d, 4.1: %d, more results %d, pkt_nr: %d, \
CS client: %u, CS result: %u, CS conn: %u, limit: %lu, TZ: 0x%lx, \
sql mode: 0x%lx, sort len: %lu, conncat len: %lu",
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index ffaa0b7278c..be8cf76b573 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -208,8 +208,12 @@ THD::THD()
#endif /*HAVE_ROW_BASED_REPLICATION*/
global_read_lock(0), is_fatal_error(0),
rand_used(0), time_zone_used(0),
- last_insert_id_used(0), insert_id_used(0), clear_next_insert_id(0),
+ arg_of_last_insert_id_function(FALSE),
+ first_successful_insert_id_in_prev_stmt(0),
+ first_successful_insert_id_in_prev_stmt_for_binlog(0),
+ first_successful_insert_id_in_cur_stmt(0),
in_lock_tables(0), bootstrap(0), derived_tables_processing(FALSE),
+ stmt_depends_on_first_successful_insert_id_in_prev_stmt(FALSE),
spcont(NULL)
{
stmt_arena= this;
@@ -224,7 +228,6 @@ THD::THD()
killed= NOT_KILLED;
db_length= col_access=0;
query_error= tmp_table_used= 0;
- next_insert_id=last_insert_id=0;
hash_clear(&handler_tables_hash);
tmp_table=0;
used_tables=0;
@@ -307,6 +310,7 @@ THD::THD()
tablespace_op=FALSE;
ulong tmp=sql_rnd_with_mutex();
randominit(&rand, tmp + (ulong) &rand, tmp + (ulong) ::query_id);
+ substitute_null_with_insert_id = FALSE;
thr_lock_info_init(&lock_info); /* safety: will be reset after start */
thr_lock_owner_init(&main_lock_id, &lock_info);
}
@@ -347,6 +351,7 @@ void THD::init(void)
reset_current_stmt_binlog_row_based();
#endif /*HAVE_ROW_BASED_REPLICATION*/
bzero((char *) &status_var, sizeof(status_var));
+ variables.lc_time_names = &my_locale_en_US;
}
@@ -628,11 +633,15 @@ bool THD::store_globals()
void THD::cleanup_after_query()
{
- if (clear_next_insert_id)
+ if (first_successful_insert_id_in_cur_stmt > 0)
{
- clear_next_insert_id= 0;
- next_insert_id= 0;
+ /* set what LAST_INSERT_ID() will return */
+ first_successful_insert_id_in_prev_stmt=
+ first_successful_insert_id_in_cur_stmt;
+ first_successful_insert_id_in_cur_stmt= 0;
+ substitute_null_with_insert_id= TRUE;
}
+ arg_of_last_insert_id_function= 0;
/* Free Items that were created during this execution */
free_items();
/* Reset where. */
@@ -1020,7 +1029,7 @@ bool select_send::send_data(List<Item> &items)
Protocol *protocol= thd->protocol;
char buff[MAX_FIELD_WIDTH];
String buffer(buff, sizeof(buff), &my_charset_bin);
- DBUG_ENTER("send_data");
+ DBUG_ENTER("select_send::send_data");
protocol->prepare_for_resend();
Item *item;
@@ -1233,7 +1242,7 @@ select_export::prepare(List<Item> &list, SELECT_LEX_UNIT *u)
bool select_export::send_data(List<Item> &items)
{
- DBUG_ENTER("send_data");
+ DBUG_ENTER("select_export::send_data");
char buff[MAX_FIELD_WIDTH],null_buff[2],space[MAX_FIELD_WIDTH];
bool space_inited=0;
String tmp(buff,sizeof(buff),&my_charset_bin),*res;
@@ -1390,7 +1399,7 @@ bool select_dump::send_data(List<Item> &items)
String tmp(buff,sizeof(buff),&my_charset_bin),*res;
tmp.length(0);
Item *item;
- DBUG_ENTER("send_data");
+ DBUG_ENTER("select_dump::send_data");
if (unit->offset_limit_cnt)
{ // using limit offset,count
@@ -2039,6 +2048,7 @@ void Security_context::init()
{
host= user= priv_user= ip= 0;
host_or_ip= "connecting host";
+ priv_host[0]= '\0';
#ifndef NO_EMBEDDED_ACCESS_CHECKS
db_access= NO_ACCESS;
#endif
@@ -2144,18 +2154,16 @@ void THD::reset_sub_statement_state(Sub_statement_state *backup,
backup->in_sub_stmt= in_sub_stmt;
backup->no_send_ok= net.no_send_ok;
backup->enable_slow_log= enable_slow_log;
- backup->last_insert_id= last_insert_id;
- backup->next_insert_id= next_insert_id;
- backup->current_insert_id= current_insert_id;
- backup->insert_id_used= insert_id_used;
- backup->last_insert_id_used= last_insert_id_used;
- backup->clear_next_insert_id= clear_next_insert_id;
backup->limit_found_rows= limit_found_rows;
backup->examined_row_count= examined_row_count;
backup->sent_row_count= sent_row_count;
backup->cuted_fields= cuted_fields;
backup->client_capabilities= client_capabilities;
backup->savepoints= transaction.savepoints;
+ backup->first_successful_insert_id_in_prev_stmt=
+ first_successful_insert_id_in_prev_stmt;
+ backup->first_successful_insert_id_in_cur_stmt=
+ first_successful_insert_id_in_cur_stmt;
if ((!lex->requires_prelocking() || is_update_query(lex->sql_command)) &&
!current_stmt_binlog_row_based)
@@ -2165,12 +2173,11 @@ void THD::reset_sub_statement_state(Sub_statement_state *backup,
/* Disable result sets */
client_capabilities &= ~CLIENT_MULTI_RESULTS;
in_sub_stmt|= new_state;
- next_insert_id= 0;
- insert_id_used= 0;
examined_row_count= 0;
sent_row_count= 0;
cuted_fields= 0;
transaction.savepoints= 0;
+ first_successful_insert_id_in_cur_stmt= 0;
/* Surpress OK packets in case if we will execute statements */
net.no_send_ok= TRUE;
@@ -2198,12 +2205,10 @@ void THD::restore_sub_statement_state(Sub_statement_state *backup)
in_sub_stmt= backup->in_sub_stmt;
net.no_send_ok= backup->no_send_ok;
enable_slow_log= backup->enable_slow_log;
- last_insert_id= backup->last_insert_id;
- next_insert_id= backup->next_insert_id;
- current_insert_id= backup->current_insert_id;
- insert_id_used= backup->insert_id_used;
- last_insert_id_used= backup->last_insert_id_used;
- clear_next_insert_id= backup->clear_next_insert_id;
+ first_successful_insert_id_in_prev_stmt=
+ backup->first_successful_insert_id_in_prev_stmt;
+ first_successful_insert_id_in_cur_stmt=
+ backup->first_successful_insert_id_in_cur_stmt;
limit_found_rows= backup->limit_found_rows;
sent_row_count= backup->sent_row_count;
client_capabilities= backup->client_capabilities;
@@ -2717,6 +2722,7 @@ int THD::binlog_query(THD::enum_binlog_query_type qtype,
bool is_trans, bool suppress_use)
{
DBUG_ENTER("THD::binlog_query");
+ DBUG_PRINT("enter", ("qtype=%d, query='%s'", qtype, query));
DBUG_ASSERT(query && mysql_bin_log.is_open());
switch (qtype) {
@@ -2784,4 +2790,26 @@ int THD::binlog_query(THD::enum_binlog_query_type qtype,
DBUG_RETURN(0);
}
+bool Discrete_intervals_list::append(ulonglong start, ulonglong val,
+ ulonglong incr)
+{
+ DBUG_ENTER("Discrete_intervals_list::append");
+ /* first, see if this can be merged with previous */
+ if ((head == NULL) || tail->merge_if_contiguous(start, val, incr))
+ {
+ /* it cannot, so need to add a new interval */
+ Discrete_interval *new_interval= new Discrete_interval(start, val, incr);
+ if (unlikely(new_interval == NULL)) // out of memory
+ DBUG_RETURN(1);
+ DBUG_PRINT("info",("adding new auto_increment interval"));
+ if (head == NULL)
+ head= current= new_interval;
+ else
+ tail->next= new_interval;
+ tail= new_interval;
+ elements++;
+ }
+ DBUG_RETURN(0);
+}
+
#endif /* !defined(MYSQL_CLIENT) */
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 5222e75f309..01b28eaee96 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -266,6 +266,9 @@ struct system_variables
CHARSET_INFO *collation_database;
CHARSET_INFO *collation_connection;
+ /* Locale Support */
+ MY_LOCALE *lc_time_names;
+
Time_zone *time_zone;
/* DATE, DATETIME and TIME formats */
@@ -770,12 +773,14 @@ class Sub_statement_state
{
public:
ulonglong options;
- ulonglong last_insert_id, next_insert_id, current_insert_id;
+ ulonglong first_successful_insert_id_in_prev_stmt;
+ ulonglong first_successful_insert_id_in_cur_stmt, insert_id_for_cur_row;
+ Discrete_interval auto_inc_interval_for_cur_row;
ulonglong limit_found_rows;
ha_rows cuted_fields, sent_row_count, examined_row_count;
ulong client_capabilities;
uint in_sub_stmt;
- bool enable_slow_log, insert_id_used, clear_next_insert_id;
+ bool enable_slow_log;
bool last_insert_id_used;
my_bool no_send_ok;
SAVEPOINT *savepoints;
@@ -1071,24 +1076,138 @@ public:
Note: in the parser, stmt_arena == thd, even for PS/SP.
*/
Query_arena *stmt_arena;
+ /* Tells if LAST_INSERT_ID(#) was called for the current statement */
+ bool arg_of_last_insert_id_function;
+ /*
+ ALL OVER THIS FILE, "insert_id" means "*automatically generated* value for
+ insertion into an auto_increment column".
+ */
+ /*
+ This is the first autogenerated insert id which was *successfully*
+ inserted by the previous statement (exactly, if the previous statement
+ didn't successfully insert an autogenerated insert id, then it's the one
+ of the statement before, etc).
+ It can also be set by SET LAST_INSERT_ID=# or SELECT LAST_INSERT_ID(#).
+ It is returned by LAST_INSERT_ID().
+ */
+ ulonglong first_successful_insert_id_in_prev_stmt;
+ /*
+ Variant of the above, used for storing in statement-based binlog. The
+ difference is that the one above can change as the execution of a stored
+ function progresses, while the one below is set once and then does not
+ change (which is the value which statement-based binlog needs).
+ */
+ ulonglong first_successful_insert_id_in_prev_stmt_for_binlog;
+ /*
+ This is the first autogenerated insert id which was *successfully*
+ inserted by the current statement. It is maintained only to set
+ first_successful_insert_id_in_prev_stmt when statement ends.
+ */
+ ulonglong first_successful_insert_id_in_cur_stmt;
+ /*
+ We follow this logic:
+ - when stmt starts, first_successful_insert_id_in_prev_stmt contains the
+ first insert id successfully inserted by the previous stmt.
+ - as stmt makes progress, handler::insert_id_for_cur_row changes; every
+ time get_auto_increment() is called, auto_inc_intervals_for_binlog is
+ augmented with the reserved interval (if statement-based binlogging).
+ - at first successful insertion of an autogenerated value,
+ first_successful_insert_id_in_cur_stmt is set to
+ handler::insert_id_for_cur_row.
+ - when stmt goes to binlog, auto_inc_intervals_for_binlog is
+ binlogged if non-empty.
+ - when stmt ends, first_successful_insert_id_in_prev_stmt is set to
+ first_successful_insert_id_in_cur_stmt.
+ */
+ /*
+ stmt_depends_on_first_successful_insert_id_in_prev_stmt is set when
+ LAST_INSERT_ID() is used by a statement.
+ If it is set, first_successful_insert_id_in_prev_stmt_for_binlog will be
+ stored in the statement-based binlog.
+ This variable is CUMULATIVE along the execution of a stored function or
+ trigger: if one substatement sets it to 1 it will stay 1 until the
+ function/trigger ends, thus making sure that
+ first_successful_insert_id_in_prev_stmt_for_binlog does not change anymore
+ and is propagated to the caller for binlogging.
+ */
+ bool stmt_depends_on_first_successful_insert_id_in_prev_stmt;
/*
- next_insert_id is set on SET INSERT_ID= #. This is used as the next
- generated auto_increment value in handler.cc
+ List of auto_increment intervals reserved by the thread so far, for
+ storage in the statement-based binlog.
+ Note that its minimum is not first_successful_insert_id_in_cur_stmt:
+ assuming a table with an autoinc column, and this happens:
+ INSERT INTO ... VALUES(3);
+ SET INSERT_ID=3; INSERT IGNORE ... VALUES (NULL);
+ then the latter INSERT will insert no rows
+ (first_successful_insert_id_in_cur_stmt == 0), but storing "INSERT_ID=3"
+ in the binlog is still needed; the list's minimum will contain 3.
*/
- ulonglong next_insert_id;
- /* Remember last next_insert_id to reset it if something went wrong */
- ulonglong prev_insert_id;
+ Discrete_intervals_list auto_inc_intervals_in_cur_stmt_for_binlog;
+ /* Used by replication and SET INSERT_ID */
+ Discrete_intervals_list auto_inc_intervals_forced;
/*
- The insert_id used for the last statement or set by SET LAST_INSERT_ID=#
- or SELECT LAST_INSERT_ID(#). Used for binary log and returned by
- LAST_INSERT_ID()
+ There is BUG#19630 where statement-based replication of stored
+ functions/triggers with two auto_increment columns breaks.
+ We however ensure that it works when there is 0 or 1 auto_increment
+ column; our rules are
+ a) on master, while executing a top statement involving substatements,
+ first top- or sub- statement to generate auto_increment values wins the
+ exclusive right to see its values be written to binlog (the write
+ will be done by the statement or its caller), and the losers won't see
+ their values be written to binlog.
+ b) on slave, while replicating a top statement involving substatements,
+ first top- or sub- statement to need to read auto_increment values from
+ the master's binlog wins the exclusive right to read them (so the losers
+ won't read their values from binlog but instead generate on their own).
+ a) implies that we mustn't backup/restore
+ auto_inc_intervals_in_cur_stmt_for_binlog.
+ b) implies that we mustn't backup/restore auto_inc_intervals_forced.
+
+ If there are more than 1 auto_increment columns, then intervals for
+ different columns may mix into the
+ auto_inc_intervals_in_cur_stmt_for_binlog list, which is logically wrong,
+ but there is no point in preventing this mixing by preventing intervals
+ from the secondly inserted column to come into the list, as such
+ prevention would be wrong too.
+ What will happen in the case of
+ INSERT INTO t1 (auto_inc) VALUES(NULL);
+ where t1 has a trigger which inserts into an auto_inc column of t2, is
+ that in binlog we'll store the interval of t1 and the interval of t2 (when
+ we store intervals, soon), then in slave, t1 will use both intervals, t2
+ will use none; if t1 inserts the same number of rows as on master,
+ normally the 2nd interval will not be used by t1, which is fine. t2's
+ values will be wrong if t2's internal auto_increment counter is different
+ from what it was on master (which is likely). In 5.1, in mixed binlogging
+ mode, row-based binlogging is used for such cases where two
+ auto_increment columns are inserted.
*/
- ulonglong last_insert_id;
+ inline void record_first_successful_insert_id_in_cur_stmt(ulonglong id)
+ {
+ if (first_successful_insert_id_in_cur_stmt == 0)
+ first_successful_insert_id_in_cur_stmt= id;
+ }
+ inline ulonglong read_first_successful_insert_id_in_prev_stmt(void)
+ {
+ if (!stmt_depends_on_first_successful_insert_id_in_prev_stmt)
+ {
+ /* It's the first time we read it */
+ first_successful_insert_id_in_prev_stmt_for_binlog=
+ first_successful_insert_id_in_prev_stmt;
+ stmt_depends_on_first_successful_insert_id_in_prev_stmt= 1;
+ }
+ return first_successful_insert_id_in_prev_stmt;
+ }
/*
- Set to the first value that LAST_INSERT_ID() returned for the last
- statement. When this is set, last_insert_id_used is set to true.
+ Used by Intvar_log_event::exec_event() and by "SET INSERT_ID=#"
+ (mysqlbinlog). We'll soon add a variant which can take many intervals in
+ argument.
*/
- ulonglong current_insert_id;
+ inline void force_one_auto_inc_interval(ulonglong next_id)
+ {
+ auto_inc_intervals_forced.empty(); // in case of multiple SET INSERT_ID
+ auto_inc_intervals_forced.append(next_id, ULONGLONG_MAX, 0);
+ }
+
ulonglong limit_found_rows;
ulonglong options; /* Bitmap of states */
longlong row_count_func; /* For the ROW_COUNT() function */
@@ -1135,7 +1254,7 @@ public:
uint tmp_table, global_read_lock;
uint server_status,open_options;
enum enum_thread_type system_thread;
- uint32 db_length;
+ uint db_length;
uint select_number; //number of select (used for EXPLAIN)
/* variables.transaction_isolation is reset to this after each commit */
enum_tx_isolation session_tx_isolation;
@@ -1157,7 +1276,8 @@ public:
bool last_cuted_field;
bool no_errors, password, is_fatal_error;
bool query_start_used, rand_used, time_zone_used;
- bool last_insert_id_used,insert_id_used, clear_next_insert_id;
+ /* for IS NULL => = last_insert_id() fix in remove_eq_conds() */
+ bool substitute_null_with_insert_id;
bool in_lock_tables;
bool query_error, bootstrap, cleanup_done;
bool tmp_table_used;
@@ -1185,9 +1305,10 @@ public:
/* Used by the sys_var class to store temporary values */
union
{
- my_bool my_bool_value;
- long long_value;
- ulong ulong_value;
+ my_bool my_bool_value;
+ long long_value;
+ ulong ulong_value;
+ ulonglong ulonglong_value;
} sys_var_tmp;
struct {
@@ -1288,20 +1409,6 @@ public:
inline void end_time() { time(&start_time); }
inline void set_time(time_t t) { time_after_lock=start_time=user_time=t; }
inline void lock_time() { time(&time_after_lock); }
- inline void insert_id(ulonglong id_arg)
- {
- last_insert_id= id_arg;
- insert_id_used=1;
- }
- inline ulonglong insert_id(void)
- {
- if (!last_insert_id_used)
- {
- last_insert_id_used=1;
- current_insert_id=last_insert_id;
- }
- return last_insert_id;
- }
inline ulonglong found_rows(void)
{
return limit_found_rows;
@@ -1418,7 +1525,17 @@ public:
inline void set_current_stmt_binlog_row_based_if_mixed()
{
#ifdef HAVE_ROW_BASED_REPLICATION
- if (variables.binlog_format == BINLOG_FORMAT_MIXED)
+ /*
+ If in a stored/function trigger, the caller should already have done the
+ change. We test in_sub_stmt to prevent introducing bugs where people
+ wouldn't ensure that, and would switch to row-based mode in the middle
+ of executing a stored function/trigger (which is too late, see also
+ reset_current_stmt_binlog_row_based()); this condition will make their
+ tests fail and so force them to propagate the
+ lex->binlog_row_based_if_mixed upwards to the caller.
+ */
+ if ((variables.binlog_format == BINLOG_FORMAT_MIXED) &&
+ (in_sub_stmt == 0))
current_stmt_binlog_row_based= TRUE;
#endif
}
@@ -1437,12 +1554,72 @@ public:
inline void reset_current_stmt_binlog_row_based()
{
#ifdef HAVE_ROW_BASED_REPLICATION
- current_stmt_binlog_row_based=
- test(variables.binlog_format == BINLOG_FORMAT_ROW);
+ /*
+ If there are temporary tables, don't reset back to
+ statement-based. Indeed it could be that:
+ CREATE TEMPORARY TABLE t SELECT UUID(); # row-based
+ # and row-based does not store updates to temp tables
+ # in the binlog.
+ INSERT INTO u SELECT * FROM t; # stmt-based
+ and then the INSERT will fail as data inserted into t was not logged.
+ So we continue with row-based until the temp table is dropped.
+ If we are in a stored function or trigger, we mustn't reset in the
+ middle of its execution (as the binary logging way of a stored function
+ or trigger is decided when it starts executing, depending for example on
+ the caller (for a stored function: if caller is SELECT or
+ INSERT/UPDATE/DELETE...).
+ */
+ if ((temporary_tables == NULL) && (in_sub_stmt == 0))
+ {
+ current_stmt_binlog_row_based=
+ test(variables.binlog_format == BINLOG_FORMAT_ROW);
+ }
#else
current_stmt_binlog_row_based= FALSE;
#endif
}
+
+ /*
+ Initialize the current database from a NULL-terminated string with length
+ If we run out of memory, we free the current database and return TRUE.
+ This way the user will notice the error as there will be no current
+ database selected (in addition to the error message set by malloc).
+ */
+ bool set_db(const char *new_db, uint new_db_len)
+ {
+ /* Do not reallocate memory if current chunk is big enough. */
+ if (db && new_db && db_length >= new_db_len)
+ memcpy(db, new_db, new_db_len+1);
+ else
+ {
+ x_free(db);
+ db= new_db ? my_strndup(new_db, new_db_len, MYF(MY_WME)) : NULL;
+ }
+ db_length= db ? new_db_len : 0;
+ return new_db && !db;
+ }
+ void reset_db(char *new_db, uint new_db_len)
+ {
+ db= new_db;
+ db_length= new_db_len;
+ }
+ /*
+ Copy the current database to the argument. Use the current arena to
+ allocate memory for a deep copy: current database may be freed after
+ a statement is parsed but before it's executed.
+ */
+ bool copy_db_to(char **p_db, uint *p_db_length)
+ {
+ if (db == NULL)
+ {
+ my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0));
+ return TRUE;
+ }
+ *p_db= strmake(db, db_length);
+ if (p_db_length)
+ *p_db_length= db_length;
+ return FALSE;
+ }
};
@@ -1589,7 +1766,7 @@ class select_insert :public select_result_interceptor {
TABLE_LIST *table_list;
TABLE *table;
List<Item> *fields;
- ulonglong last_insert_id;
+ ulonglong autoinc_value_of_last_inserted_row; // autogenerated or not
COPY_INFO info;
bool insert_into_view;
@@ -1637,7 +1814,8 @@ public:
virtual bool can_rollback_data() { return 1; }
// Needed for access from local class MY_HOOKS in prepare(), since thd is proteted.
- THD *get_thd(void) { return thd; }
+ const THD *get_thd(void) { return thd; }
+ const HA_CREATE_INFO *get_create_info() { return create_info; };
};
#include <myisam.h>
@@ -1790,7 +1968,7 @@ typedef struct st_sort_buffer {
class Table_ident :public Sql_alloc
{
- public:
+public:
LEX_STRING db;
LEX_STRING table;
SELECT_LEX_UNIT *sel;
diff --git a/sql/sql_db.cc b/sql/sql_db.cc
index bcd1b99b91a..77d99862bf0 100644
--- a/sql/sql_db.cc
+++ b/sql/sql_db.cc
@@ -906,33 +906,13 @@ exit:
(void)sp_drop_db_routines(thd, db); /* QQ Ignore errors for now */
error= Events::drop_schema_events(thd, db);
/*
- If this database was the client's selected database, we silently change the
- client's selected database to nothing (to have an empty SELECT DATABASE()
- in the future). For this we free() thd->db and set it to 0. But we don't do
- free() for the slave thread. Indeed, doing a x_free() on it leads to nasty
- problems (i.e. long painful debugging) because in this thread, thd->db is
- the same as data_buf and db of the Query_log_event which is dropping the
- database. So if you free() thd->db, you're freeing data_buf. You set
- thd->db to 0 but not data_buf (thd->db and data_buf are two distinct
- pointers which point to the same place). Then in ~Query_log_event(), we
- have 'if (data_buf) free(data_buf)' data_buf is !=0 so this makes a
- DOUBLE free().
- Side effects of this double free() are, randomly (depends on the machine),
- when the slave is replicating a DROP DATABASE:
- - garbage characters in the error message:
- "Error 'Can't drop database 'test2'; database doesn't exist' on query
- 'h4zI©'"
- - segfault
- - hang in "free(vio)" (yes!) in the I/O or SQL slave threads (so slave
- server hangs at shutdown etc).
+ If this database was the client's selected database, we silently
+ change the client's selected database to nothing (to have an empty
+ SELECT DATABASE() in the future). For this we free() thd->db and set
+ it to 0.
*/
if (thd->db && !strcmp(thd->db, db))
- {
- if (!(thd->slave_thread)) /* a slave thread will free it itself */
- x_free(thd->db);
- thd->db= 0;
- thd->db_length= 0;
- }
+ thd->set_db(NULL, 0);
VOID(pthread_mutex_unlock(&LOCK_mysql_create_db));
start_waiting_global_read_lock(thd);
exit2:
@@ -1228,38 +1208,52 @@ err:
/*
- Change default database.
+ Change the current database.
SYNOPSIS
mysql_change_db()
- thd Thread handler
- name Databasename
- no_access_check True don't do access check. In this case name may be ""
+ thd thread handle
+ name database name
+ no_access_check if TRUE, don't do access check. In this
+ case name may be ""
DESCRIPTION
- Becasue the database name may have been given directly from the
- communication packet (in case of 'connect' or 'COM_INIT_DB')
- we have to do end space removal in this function.
+ Check that the database name corresponds to a valid and
+ existent database, check access rights (unless called with
+ no_access_check), and set the current database. This function
+ is called to change the current database upon user request
+ (COM_CHANGE_DB command) or temporarily, to execute a stored
+ routine.
NOTES
- Do as little as possible in this function, as it is not called for the
- replication slave SQL thread (for that thread, setting of thd->db is done
- in ::exec_event() methods of log_event.cc).
-
- This function does not send anything, including error messages to the
- client, if that should be sent to the client, call net_send_error after
- this function.
+ This function is not the only way to switch the database that
+ is currently employed. When the replication slave thread
+ switches the database before executing a query, it calls
+ thd->set_db directly. However, if the query, in turn, uses
+ a stored routine, the stored routine will use this function,
+ even if it's run on the slave.
+
+ This function allocates the name of the database on the system
+ heap: this is necessary to be able to uniformly change the
+ database from any module of the server. Up to 5.0 different
+ modules were using different memory to store the name of the
+ database, and this led to memory corruption: a stack pointer
+ set by Stored Procedures was used by replication after the
+ stack address was long gone.
+
+ This function does not send anything, including error
+ messages, to the client. If that should be sent to the client,
+ call net_send_error after this function.
RETURN VALUES
- 0 ok
+ 0 OK
1 error
*/
bool mysql_change_db(THD *thd, const char *name, bool no_access_check)
{
- int length, db_length;
- char *dbname= thd->slave_thread ? (char *) name :
- my_strdup((char *) name, MYF(MY_WME));
+ int path_length, db_length;
+ char *db_name;
char path[FN_REFLEN];
HA_CREATE_INFO create;
bool system_db= 0;
@@ -1271,32 +1265,35 @@ bool mysql_change_db(THD *thd, const char *name, bool no_access_check)
DBUG_ENTER("mysql_change_db");
DBUG_PRINT("enter",("name: '%s'",name));
- LINT_INIT(db_length);
-
- /* dbname can only be NULL if malloc failed */
- if (!dbname || !(db_length= strlen(dbname)))
+ if (name == NULL || name[0] == '\0' && no_access_check == FALSE)
{
- if (no_access_check && dbname)
- {
- /* Called from SP when orignal database was not set */
- system_db= 1;
- goto end;
- }
- if (!(thd->slave_thread))
- x_free(dbname); /* purecov: inspected */
- my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR),
- MYF(0)); /* purecov: inspected */
+ my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0));
DBUG_RETURN(1); /* purecov: inspected */
}
- if (check_db_name(dbname))
+ else if (name[0] == '\0')
+ {
+ /* Called from SP to restore the original database, which was NULL */
+ DBUG_ASSERT(no_access_check);
+ system_db= 1;
+ db_name= NULL;
+ db_length= 0;
+ goto end;
+ }
+ /*
+ Now we need to make a copy because check_db_name requires a
+ non-constant argument. TODO: fix check_db_name.
+ */
+ if ((db_name= my_strdup(name, MYF(MY_WME))) == NULL)
+ DBUG_RETURN(1); /* the error is set */
+ db_length= strlen(db_name);
+ if (check_db_name(db_name))
{
- my_error(ER_WRONG_DB_NAME, MYF(0), dbname);
- if (!(thd->slave_thread))
- my_free(dbname, MYF(0));
+ my_error(ER_WRONG_DB_NAME, MYF(0), db_name);
+ my_free(db_name, MYF(0));
DBUG_RETURN(1);
}
- DBUG_PRINT("info",("Use database: %s", dbname));
- if (!my_strcasecmp(system_charset_info, dbname, information_schema_name.str))
+ DBUG_PRINT("info",("Use database: %s", db_name));
+ if (!my_strcasecmp(system_charset_info, db_name, information_schema_name.str))
{
system_db= 1;
#ifndef NO_EMBEDDED_ACCESS_CHECKS
@@ -1311,48 +1308,35 @@ bool mysql_change_db(THD *thd, const char *name, bool no_access_check)
if (test_all_bits(sctx->master_access, DB_ACLS))
db_access=DB_ACLS;
else
- db_access= (acl_get(sctx->host, sctx->ip, sctx->priv_user, dbname, 0) |
+ db_access= (acl_get(sctx->host, sctx->ip, sctx->priv_user, db_name, 0) |
sctx->master_access);
if (!(db_access & DB_ACLS) && (!grant_option ||
- check_grant_db(thd,dbname)))
+ check_grant_db(thd,db_name)))
{
my_error(ER_DBACCESS_DENIED_ERROR, MYF(0),
sctx->priv_user,
sctx->priv_host,
- dbname);
+ db_name);
general_log_print(thd, COM_INIT_DB, ER(ER_DBACCESS_DENIED_ERROR),
- sctx->priv_user, sctx->priv_host, dbname);
- if (!(thd->slave_thread))
- my_free(dbname,MYF(0));
+ sctx->priv_user, sctx->priv_host, db_name);
+ my_free(db_name,MYF(0));
DBUG_RETURN(1);
}
}
#endif
- length= build_table_filename(path, sizeof(path), dbname, "", "");
- if (length && path[length-1] == FN_LIBCHAR)
- path[length-1]=0; // remove ending '\'
+ path_length= build_table_filename(path, sizeof(path), db_name, "", "");
+ if (path_length && path[path_length-1] == FN_LIBCHAR)
+ path[path_length-1]= '\0'; // remove ending '\'
if (my_access(path,F_OK))
{
- my_error(ER_BAD_DB_ERROR, MYF(0), dbname);
- if (!(thd->slave_thread))
- my_free(dbname,MYF(0));
+ my_error(ER_BAD_DB_ERROR, MYF(0), db_name);
+ my_free(db_name, MYF(0));
DBUG_RETURN(1);
}
end:
- if (!(thd->slave_thread))
- x_free(thd->db);
- if (dbname && dbname[0] == 0)
- {
- if (!(thd->slave_thread))
- my_free(dbname, MYF(0));
- thd->db= NULL;
- thd->db_length= 0;
- }
- else
- {
- thd->db= dbname; // THD::~THD will free this
- thd->db_length= db_length;
- }
+ x_free(thd->db);
+ DBUG_ASSERT(db_name == NULL || db_name[0] != '\0');
+ thd->reset_db(db_name, db_length); // THD::~THD will free this
#ifndef NO_EMBEDDED_ACCESS_CHECKS
if (!no_access_check)
sctx->db_access= db_access;
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index b9ce1a53aaf..659695e8e73 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -428,7 +428,7 @@ extern "C" int refpos_order_cmp(void* arg, const void *a,const void *b)
bool mysql_multi_delete_prepare(THD *thd)
{
LEX *lex= thd->lex;
- TABLE_LIST *aux_tables= (TABLE_LIST *)lex->auxilliary_table_list.first;
+ TABLE_LIST *aux_tables= (TABLE_LIST *)lex->auxiliary_table_list.first;
TABLE_LIST *target_tbl;
DBUG_ENTER("mysql_multi_delete_prepare");
@@ -986,12 +986,12 @@ trunc_by_del:
thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_NOT_AUTOCOMMIT);
ha_enable_transaction(thd, FALSE);
mysql_init_select(thd->lex);
-#ifdef HAVE_ROW_BASED_REPLICATION
+ bool save_binlog_row_based= thd->current_stmt_binlog_row_based;
thd->clear_current_stmt_binlog_row_based();
-#endif
error= mysql_delete(thd, table_list, (COND*) 0, (SQL_LIST*) 0,
HA_POS_ERROR, LL(0), TRUE);
ha_enable_transaction(thd, TRUE);
thd->options= save_options;
+ thd->current_stmt_binlog_row_based= save_binlog_row_based;
DBUG_RETURN(error);
}
diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc
index bf035401bea..0d893a6c9be 100644
--- a/sql/sql_handler.cc
+++ b/sql/sql_handler.cc
@@ -254,7 +254,8 @@ err:
DESCRIPTION
Though this function takes a list of tables, only the first list entry
- will be closed. Broadcasts a COND_refresh condition.
+ will be closed.
+ Broadcasts refresh if it closed the table.
RETURN
FALSE ok
@@ -291,7 +292,7 @@ bool mysql_ha_close(THD *thd, TABLE_LIST *tables)
if (close_thread_table(thd, table_ptr))
{
/* Tell threads waiting for refresh that something has happened */
- VOID(pthread_cond_broadcast(&COND_refresh));
+ broadcast_refresh();
}
VOID(pthread_mutex_unlock(&LOCK_open));
}
@@ -615,7 +616,7 @@ err0:
tables are closed (if MYSQL_HA_FLUSH_ALL) is set.
If 'tables' is NULL and MYSQL_HA_FLUSH_ALL is not set,
all HANDLER tables marked for flush are closed.
- Broadcasts a COND_refresh condition, for every table closed.
+ Broadcasts refresh for every table closed.
NOTE
Since mysql_ha_flush() is called when the base table has to be closed,
@@ -712,7 +713,7 @@ int mysql_ha_flush(THD *thd, TABLE_LIST *tables, uint mode_flags,
MYSQL_HA_REOPEN_ON_USAGE mark for reopen.
DESCRIPTION
- Broadcasts a COND_refresh condition, for every table closed.
+ Broadcasts refresh if it closed the table.
The caller must lock LOCK_open.
RETURN
@@ -750,7 +751,7 @@ static int mysql_ha_flush_table(THD *thd, TABLE **table_ptr, uint mode_flags)
if (close_thread_table(thd, table_ptr))
{
/* Tell threads waiting for refresh that something has happened */
- VOID(pthread_cond_broadcast(&COND_refresh));
+ broadcast_refresh();
}
DBUG_RETURN(0);
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 7b017ad7317..fae79ba58c5 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -17,6 +17,44 @@
/* Insert of records */
+/*
+ INSERT DELAYED
+
+ Insert delayed is distinguished from a normal insert by lock_type ==
+ TL_WRITE_DELAYED instead of TL_WRITE. It first tries to open a
+ "delayed" table (delayed_get_table()), but falls back to
+ open_and_lock_tables() on error and proceeds as normal insert then.
+
+ Opening a "delayed" table means to find a delayed insert thread that
+ has the table open already. If this fails, a new thread is created and
+ waited for to open and lock the table.
+
+ If accessing the thread succeeded, in
+ delayed_insert::get_local_table() the table of the thread is copied
+ for local use. A copy is required because the normal insert logic
+ works on a target table, but the other threads table object must not
+ be used. The insert logic uses the record buffer to create a record.
+ And the delayed insert thread uses the record buffer to pass the
+ record to the table handler. So there must be different objects. Also
+ the copied table is not included in the lock, so that the statement
+ can proceed even if the real table cannot be accessed at this moment.
+
+ Copying a table object is not a trivial operation. Besides the TABLE
+ object there are the field pointer array, the field objects and the
+ record buffer. After copying the field objects, their pointers into
+ the record must be "moved" to point to the new record buffer.
+
+ After this setup the normal insert logic is used. Only that for
+ delayed inserts write_delayed() is called instead of write_record().
+ It inserts the rows into a queue and signals the delayed insert thread
+ instead of writing directly to the table.
+
+ The delayed insert thread awakes from the signal. It locks the table,
+ inserts the rows from the queue, unlocks the table, and waits for the
+ next signal. It does normally live until a FLUSH TABLES or SHUTDOWN.
+
+*/
+
#include "mysql_priv.h"
#include "sp_head.h"
#include "sql_trigger.h"
@@ -26,8 +64,8 @@
static int check_null_fields(THD *thd,TABLE *entry);
#ifndef EMBEDDED_LIBRARY
static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list);
-static int write_delayed(THD *thd,TABLE *table, enum_duplicates dup, bool ignore,
- char *query, uint query_length, bool log_on);
+static int write_delayed(THD *thd, TABLE *table, enum_duplicates dup,
+ LEX_STRING query, bool ignore, bool log_on);
static void end_delayed_insert(THD *thd);
pthread_handler_t handle_delayed_insert(void *arg);
static void unlink_blobs(register TABLE *table);
@@ -181,9 +219,6 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
}
}
}
- if (table->found_next_number_field)
- table->mark_auto_increment_column();
- table->mark_columns_needed_for_insert();
// For the values we need select_priv
#ifndef NO_EMBEDDED_ACCESS_CHECKS
table->grant.want_privilege= (SELECT_ACL & ~table->grant.privilege);
@@ -312,9 +347,8 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
{
if (thd->locked_tables)
{
- if (find_locked_table(thd,
- table_list->db ? table_list->db : thd->db,
- table_list->table_name))
+ DBUG_ASSERT(table_list->db); /* Must be set in the parser */
+ if (find_locked_table(thd, table_list->db, table_list->table_name))
{
my_error(ER_DELAYED_INSERT_TABLE_LOCKED, MYF(0),
table_list->table_name);
@@ -411,10 +445,12 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
table->next_number_field=table->found_next_number_field;
error=0;
- id=0;
thd->proc_info="update";
if (duplic != DUP_ERROR || ignore)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
+ if (duplic == DUP_REPLACE &&
+ (!table->triggers || !table->triggers->has_delete_triggers()))
+ table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
/*
let's *try* to start bulk inserts. It won't necessary
start them as values_list.elements should be greater than
@@ -443,6 +479,8 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
error= 1;
}
+ table->mark_columns_needed_for_insert();
+
if (table_list->prepare_where(thd, 0, TRUE) ||
table_list->prepare_check_option(thd))
error= 1;
@@ -511,22 +549,13 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
#ifndef EMBEDDED_LIBRARY
if (lock_type == TL_WRITE_DELAYED)
{
- error=write_delayed(thd, table, duplic, ignore, query, thd->query_length, log_on);
+ LEX_STRING const st_query = { query, thd->query_length };
+ error=write_delayed(thd, table, duplic, st_query, ignore, log_on);
query=0;
}
else
#endif
error=write_record(thd, table ,&info);
- /*
- If auto_increment values are used, save the first one
- for LAST_INSERT_ID() and for the update log.
- We can't use insert_id() as we don't want to touch the
- last_insert_id_used flag.
- */
- if (! id && thd->insert_id_used)
- { // Get auto increment value
- id= thd->last_insert_id;
- }
if (error)
break;
thd->row_count++;
@@ -534,6 +563,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
free_underlaid_joins(thd, &thd->lex->select_lex);
joins_freed= TRUE;
+ table->file->ha_release_auto_increment();
/*
Now all rows are inserted. Time to update logs and sends response to
@@ -544,7 +574,6 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
{
if (!error)
{
- id=0; // No auto_increment id
info.copied=values_list.elements;
end_delayed_insert(thd);
}
@@ -558,11 +587,6 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
table->file->print_error(my_errno,MYF(0));
error=1;
}
- if (id && values_list.elements != 1)
- thd->insert_id(id); // For update log
- else if (table->next_number_field && info.copied)
- id=table->next_number_field->val_int(); // Return auto_increment value
-
transactional_table= table->file->has_transactions();
if ((changed= (info.copied || info.deleted || info.updated)))
@@ -611,18 +635,30 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
}
}
thd->proc_info="end";
+ /*
+ We'll report to the client this id:
+ - if the table contains an autoincrement column and we successfully
+ inserted an autogenerated value, the autogenerated value.
+ - if the table contains no autoincrement column and LAST_INSERT_ID(X) was
+ called, X.
+ - if the table contains an autoincrement column, and some rows were
+ inserted, the id of the last "inserted" row (if IGNORE, that value may not
+ have been really inserted but ignored).
+ */
+ id= (thd->first_successful_insert_id_in_cur_stmt > 0) ?
+ thd->first_successful_insert_id_in_cur_stmt :
+ (thd->arg_of_last_insert_id_function ?
+ thd->first_successful_insert_id_in_prev_stmt :
+ ((table->next_number_field && info.copied) ?
+ table->next_number_field->val_int() : 0));
table->next_number_field=0;
thd->count_cuted_fields= CHECK_FIELD_IGNORE;
- thd->next_insert_id=0; // Reset this if wrongly used
if (duplic != DUP_ERROR || ignore)
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
+ if (duplic == DUP_REPLACE &&
+ (!table->triggers || !table->triggers->has_delete_triggers()))
+ table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
- /* Reset value of LAST_INSERT_ID if no rows where inserted */
- if (!info.copied && thd->insert_id_used)
- {
- thd->insert_id(0);
- id=0;
- }
if (error)
goto abort;
if (values_list.elements == 1 && (!(thd->options & OPTION_WARNINGS) ||
@@ -644,8 +680,6 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
thd->row_count_func= info.copied+info.deleted+info.updated;
::send_ok(thd, (ulong) thd->row_count_func, id, buff);
}
- if (table != NULL)
- table->file->release_auto_increment();
thd->abort_on_warning= 0;
DBUG_RETURN(FALSE);
@@ -655,7 +689,7 @@ abort:
end_delayed_insert(thd);
#endif
if (table != NULL)
- table->file->release_auto_increment();
+ table->file->ha_release_auto_increment();
if (!joins_freed)
free_underlaid_joins(thd, &thd->lex->select_lex);
thd->abort_on_warning= 0;
@@ -696,6 +730,7 @@ static bool check_view_insertability(THD * thd, TABLE_LIST *view)
uint used_fields_buff_size= bitmap_buffer_size(table->s->fields);
uint32 *used_fields_buff= (uint32*)thd->alloc(used_fields_buff_size);
MY_BITMAP used_fields;
+ enum_mark_columns save_mark_used_columns= thd->mark_used_columns;
DBUG_ENTER("check_key_in_view");
if (!used_fields_buff)
@@ -707,15 +742,26 @@ static bool check_view_insertability(THD * thd, TABLE_LIST *view)
bitmap_clear_all(&used_fields);
view->contain_auto_increment= 0;
+ /*
+ we must not set query_id for fields as they're not
+ really used in this context
+ */
+ thd->mark_used_columns= MARK_COLUMNS_NONE;
/* check simplicity and prepare unique test of view */
for (trans= trans_start; trans != trans_end; trans++)
{
if (!trans->item->fixed && trans->item->fix_fields(thd, &trans->item))
- return TRUE;
+ {
+ thd->mark_used_columns= save_mark_used_columns;
+ DBUG_RETURN(TRUE);
+ }
Item_field *field;
/* simple SELECT list entry (field without expression) */
if (!(field= trans->item->filed_for_view_update()))
+ {
+ thd->mark_used_columns= save_mark_used_columns;
DBUG_RETURN(TRUE);
+ }
if (field->field->unireg_check == Field::NEXT_NUMBER)
view->contain_auto_increment= 1;
/* prepare unique test */
@@ -725,6 +771,7 @@ static bool check_view_insertability(THD * thd, TABLE_LIST *view)
*/
trans->item= field;
}
+ thd->mark_used_columns= save_mark_used_columns;
/* unique test */
for (trans= trans_start; trans != trans_end; trans++)
{
@@ -964,6 +1011,8 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
int error, trg_error= 0;
char *key=0;
MY_BITMAP *save_read_set, *save_write_set;
+ ulonglong prev_insert_id= table->file->next_insert_id;
+ ulonglong insert_id_for_cur_row= 0;
DBUG_ENTER("write_record");
info->records++;
@@ -976,12 +1025,35 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
while ((error=table->file->ha_write_row(table->record[0])))
{
uint key_nr;
- if (error != HA_WRITE_SKIP)
+ /*
+ If we do more than one iteration of this loop, from the second one the
+ row will have an explicit value in the autoinc field, which was set at
+ the first call of handler::update_auto_increment(). So we must save
+ the autogenerated value to avoid thd->insert_id_for_cur_row to become
+ 0.
+ */
+ if (table->file->insert_id_for_cur_row > 0)
+ insert_id_for_cur_row= table->file->insert_id_for_cur_row;
+ else
+ table->file->insert_id_for_cur_row= insert_id_for_cur_row;
+ bool is_duplicate_key_error;
+ if (table->file->is_fatal_error(error, HA_CHECK_DUP))
goto err;
- table->file->restore_auto_increment(); // it's too early here! BUG#20188
+ is_duplicate_key_error= table->file->is_fatal_error(error, 0);
+ if (!is_duplicate_key_error)
+ {
+ /*
+ We come here when we had an ignorable error which is not a duplicate
+ key error. In this we ignore error if ignore flag is set, otherwise
+ report error as usual. We will not do any duplicate key processing.
+ */
+ if (info->ignore)
+ goto ok_or_after_trg_err; /* Ignoring a not fatal error, return 0 */
+ goto err;
+ }
if ((int) (key_nr = table->file->get_dup_key(error)) < 0)
{
- error=HA_WRITE_SKIP; /* Database can't find key */
+ error= HA_ERR_FOUND_DUPP_KEY; /* Database can't find key */
goto err;
}
/* Read all columns for the row we are going to replace */
@@ -994,7 +1066,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
if (info->handle_duplicates == DUP_REPLACE &&
table->next_number_field &&
key_nr == table->s->next_number_index &&
- table->file->auto_increment_column_changed)
+ (insert_id_for_cur_row > 0))
goto err;
if (table->file->ha_table_flags() & HA_DUPLICATE_POS)
{
@@ -1053,21 +1125,29 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
if (res == VIEW_CHECK_ERROR)
goto before_trg_err;
- if (thd->clear_next_insert_id)
- {
- /* Reset auto-increment cacheing if we do an update */
- thd->clear_next_insert_id= 0;
- thd->next_insert_id= 0;
- }
if ((error=table->file->ha_update_row(table->record[1],
table->record[0])))
{
- if ((error == HA_ERR_FOUND_DUPP_KEY) && info->ignore)
+ if (info->ignore &&
+ !table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
+ {
+ table->file->restore_auto_increment(prev_insert_id);
goto ok_or_after_trg_err;
+ }
goto err;
}
info->updated++;
-
+ /*
+ If ON DUP KEY UPDATE updates a row instead of inserting one, and
+ there is an auto_increment column, then SELECT LAST_INSERT_ID()
+ returns the id of the updated row:
+ */
+ if (table->next_number_field)
+ {
+ longlong field_val= table->next_number_field->val_int();
+ thd->record_first_successful_insert_id_in_cur_stmt(field_val);
+ table->file->adjust_next_insert_id_after_explicit_value(field_val);
+ }
trg_error= (table->triggers &&
table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
TRG_ACTION_AFTER, TRUE));
@@ -1096,16 +1176,11 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_BOTH) &&
(!table->triggers || !table->triggers->has_delete_triggers()))
{
- if (thd->clear_next_insert_id)
- {
- /* Reset auto-increment cacheing if we do an update */
- thd->clear_next_insert_id= 0;
- thd->next_insert_id= 0;
- }
if ((error=table->file->ha_update_row(table->record[1],
table->record[0])))
goto err;
info->deleted++;
+ thd->record_first_successful_insert_id_in_cur_stmt(table->file->insert_id_for_cur_row);
/*
Since we pretend that we have done insert we should call
its after triggers.
@@ -1134,6 +1209,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
}
}
}
+ thd->record_first_successful_insert_id_in_cur_stmt(table->file->insert_id_for_cur_row);
/*
Restore column maps if they where replaced during an duplicate key
problem.
@@ -1145,14 +1221,15 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
else if ((error=table->file->ha_write_row(table->record[0])))
{
if (!info->ignore ||
- (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE))
+ table->file->is_fatal_error(error, HA_CHECK_DUP))
goto err;
- table->file->restore_auto_increment();
+ table->file->restore_auto_increment(prev_insert_id);
goto ok_or_after_trg_err;
}
after_trg_n_copied_inc:
info->copied++;
+ thd->record_first_successful_insert_id_in_cur_stmt(table->file->insert_id_for_cur_row);
trg_error= (table->triggers &&
table->triggers->process_triggers(thd, TRG_EVENT_INSERT,
TRG_ACTION_AFTER, TRUE));
@@ -1172,6 +1249,7 @@ err:
table->file->print_error(error,MYF(0));
before_trg_err:
+ table->file->restore_auto_increment(prev_insert_id);
if (key)
my_safe_afree(key, table->s->max_unique_length, MAX_KEY_LENGTH);
table->column_bitmaps_set(save_read_set, save_write_set);
@@ -1234,14 +1312,20 @@ public:
char *record;
enum_duplicates dup;
time_t start_time;
- bool query_start_used,last_insert_id_used,insert_id_used, ignore, log_query;
- ulonglong last_insert_id;
+ bool query_start_used, ignore, log_query;
+ bool stmt_depends_on_first_successful_insert_id_in_prev_stmt;
+ ulonglong first_successful_insert_id_in_prev_stmt;
timestamp_auto_set_type timestamp_field_type;
+ LEX_STRING query;
- delayed_row(enum_duplicates dup_arg, bool ignore_arg, bool log_query_arg)
- :record(0), dup(dup_arg), ignore(ignore_arg), log_query(log_query_arg) {}
+ delayed_row(LEX_STRING const query_arg, enum_duplicates dup_arg,
+ bool ignore_arg, bool log_query_arg)
+ : record(0), dup(dup_arg), ignore(ignore_arg), log_query(log_query_arg),
+ query(query_arg)
+ {}
~delayed_row()
{
+ x_free(query.str);
x_free(record);
}
};
@@ -1249,9 +1333,6 @@ public:
class delayed_insert :public ilink {
uint locks_in_memory;
- char *query;
- ulong query_length;
- ulong query_allocated;
public:
THD thd;
TABLE *table;
@@ -1265,7 +1346,7 @@ public:
TABLE_LIST table_list; // Argument
delayed_insert()
- :locks_in_memory(0), query(0), query_length(0), query_allocated(0),
+ :locks_in_memory(0),
table(0),tables_in_use(0),stacked_inserts(0), status(0), dead(0),
group_count(0)
{
@@ -1276,6 +1357,11 @@ public:
thd.command=COM_DELAYED_INSERT;
thd.lex->current_select= 0; // for my_message_sql
thd.lex->sql_command= SQLCOM_INSERT; // For innodb::store_lock()
+ /*
+ Statement-based replication of INSERT DELAYED has problems with RAND()
+ and user vars, so in mixed mode we go to row-based.
+ */
+ thd.set_current_stmt_binlog_row_based_if_mixed();
bzero((char*) &thd.net, sizeof(thd.net)); // Safety
bzero((char*) &table_list, sizeof(table_list)); // Safety
@@ -1291,7 +1377,6 @@ public:
}
~delayed_insert()
{
- my_free(query, MYF(MY_WME|MY_ALLOW_ZERO_PTR));
/* The following is not really needed, but just for safety */
delayed_row *row;
while ((row=rows.get()))
@@ -1311,25 +1396,6 @@ public:
VOID(pthread_cond_broadcast(&COND_thread_count)); /* Tell main we are ready */
}
- int set_query(char const *q, ulong qlen) {
- if (q && qlen > 0)
- {
- if (query_allocated < qlen + 1)
- {
- ulong const flags(MY_WME|MY_FREE_ON_ERROR|MY_ALLOW_ZERO_PTR);
- query= my_realloc(query, qlen + 1, MYF(flags));
- if (query == 0)
- return HA_ERR_OUT_OF_MEM;
- query_allocated= qlen;
- }
- query_length= qlen;
- memcpy(query, q, qlen + 1);
- }
- else
- query_length= 0;
- return 0;
- }
-
/* The following is for checking when we can delete ourselves */
inline void lock()
{
@@ -1387,8 +1453,8 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list)
TABLE *table;
DBUG_ENTER("delayed_get_table");
- if (!table_list->db)
- table_list->db=thd->db;
+ /* Must be set in the parser */
+ DBUG_ASSERT(table_list->db);
/* Find the thread which handles this table. */
if (!(tmp=find_handler(thd,table_list)))
@@ -1407,18 +1473,6 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list)
*/
if (! (tmp= find_handler(thd, table_list)))
{
- /*
- Avoid that a global read lock steps in while we are creating the
- new thread. It would block trying to open the table. Hence, the
- DI thread and this thread would wait until after the global
- readlock is gone. Since the insert thread needs to wait for a
- global read lock anyway, we do it right now. Note that
- wait_if_global_read_lock() sets a protection against a new
- global read lock when it succeeds. This needs to be released by
- start_waiting_global_read_lock().
- */
- if (wait_if_global_read_lock(thd, 0, 1))
- goto err;
if (!(tmp=new delayed_insert()))
{
my_error(ER_OUTOFMEMORY,MYF(0),sizeof(delayed_insert));
@@ -1427,15 +1481,15 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list)
pthread_mutex_lock(&LOCK_thread_count);
thread_count++;
pthread_mutex_unlock(&LOCK_thread_count);
- if (!(tmp->thd.db=my_strdup(table_list->db,MYF(MY_WME))) ||
- !(tmp->thd.query=my_strdup(table_list->table_name,MYF(MY_WME))))
+ tmp->thd.set_db(table_list->db, strlen(table_list->db));
+ tmp->thd.query= my_strdup(table_list->table_name,MYF(MY_WME));
+ if (tmp->thd.db == NULL || tmp->thd.query == NULL)
{
delete tmp;
my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0));
goto err1;
}
tmp->table_list= *table_list; // Needed to open table
- tmp->table_list.db= tmp->thd.db;
tmp->table_list.alias= tmp->table_list.table_name= tmp->thd.query;
tmp->lock();
pthread_mutex_lock(&tmp->mutex);
@@ -1459,11 +1513,6 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list)
pthread_cond_wait(&tmp->cond_client,&tmp->mutex);
}
pthread_mutex_unlock(&tmp->mutex);
- /*
- Release the protection against the global read lock and wake
- everyone, who might want to set a global read lock.
- */
- start_waiting_global_read_lock(thd);
thd->proc_info="got old table";
if (tmp->thd.killed)
{
@@ -1499,11 +1548,6 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list)
err1:
thd->fatal_error();
- /*
- Release the protection against the global read lock and wake
- everyone, who might want to set a global read lock.
- */
- start_waiting_global_read_lock(thd);
err:
pthread_mutex_unlock(&LOCK_delayed_create);
DBUG_RETURN(0); // Continue with normal insert
@@ -1524,6 +1568,7 @@ TABLE *delayed_insert::get_local_table(THD* client_thd)
TABLE *copy;
TABLE_SHARE *share= table->s;
byte *bitmap;
+ DBUG_ENTER("delayed_insert::get_local_table");
/* First request insert thread to get a lock */
status=1;
@@ -1547,6 +1592,13 @@ TABLE *delayed_insert::get_local_table(THD* client_thd)
}
}
+ /*
+ Allocate memory for the TABLE object, the field pointers array, and
+ one record buffer of reclength size. Normally a table has three
+ record buffers of rec_buff_length size, which includes alignment
+ bytes. Since the table copy is used for creating one record only,
+ the other record buffers and alignment are unnecessary.
+ */
client_thd->proc_info="allocating local table";
copy= (TABLE*) client_thd->alloc(sizeof(*copy)+
(share->fields+1)*sizeof(Field**)+
@@ -1554,23 +1606,28 @@ TABLE *delayed_insert::get_local_table(THD* client_thd)
share->column_bitmap_size*2);
if (!copy)
goto error;
- *copy= *table;
+ /* Copy the TABLE object. */
+ *copy= *table;
/* We don't need to change the file handler here */
- field= copy->field= (Field**) (copy+1);
- bitmap= (byte*) (field+share->fields+1);
- copy->record[0]= (bitmap+ share->column_bitmap_size*2);
- memcpy((char*) copy->record[0],(char*) table->record[0],share->reclength);
-
- /* Make a copy of all fields */
-
- adjust_ptrs=PTR_BYTE_DIFF(copy->record[0],table->record[0]);
-
- found_next_number_field=table->found_next_number_field;
- for (org_field=table->field ; *org_field ; org_field++,field++)
+ /* Assign the pointers for the field pointers array and the record. */
+ field= copy->field= (Field**) (copy + 1);
+ bitmap= (byte*) (field + share->fields + 1);
+ copy->record[0]= (bitmap + share->column_bitmap_size * 2);
+ memcpy((char*) copy->record[0], (char*) table->record[0], share->reclength);
+ /*
+ Make a copy of all fields.
+ The copied fields need to point into the copied record. This is done
+ by copying the field objects with their old pointer values and then
+ "move" the pointers by the distance between the original and copied
+ records. That way we preserve the relative positions in the records.
+ */
+ adjust_ptrs= PTR_BYTE_DIFF(copy->record[0], table->record[0]);
+ found_next_number_field= table->found_next_number_field;
+ for (org_field= table->field; *org_field; org_field++, field++)
{
- if (!(*field= (*org_field)->new_field(client_thd->mem_root,copy)))
- return 0;
+ if (!(*field= (*org_field)->new_field(client_thd->mem_root, copy, 1)))
+ DBUG_RETURN(0);
(*field)->orig_table= copy; // Remove connection
(*field)->move_field_offset(adjust_ptrs); // Point at copy->record[0]
if (*org_field == found_next_number_field)
@@ -1603,26 +1660,27 @@ TABLE *delayed_insert::get_local_table(THD* client_thd)
copy->read_set= &copy->def_read_set;
copy->write_set= &copy->def_write_set;
- return copy;
+ DBUG_RETURN(copy);
/* Got fatal error */
error:
tables_in_use--;
status=1;
pthread_cond_signal(&cond); // Inform thread about abort
- return 0;
+ DBUG_RETURN(0);
}
/* Put a question in queue */
-static int write_delayed(THD *thd,TABLE *table,enum_duplicates duplic,
- bool ignore, char *query, uint query_length,
- bool log_on)
+static int
+write_delayed(THD *thd,TABLE *table, enum_duplicates duplic,
+ LEX_STRING query, bool ignore, bool log_on)
{
- delayed_row *row=0;
+ delayed_row *row;
delayed_insert *di=thd->di;
DBUG_ENTER("write_delayed");
+ DBUG_PRINT("enter", ("query = '%s' length %u", query.str, query.length));
thd->proc_info="waiting for handler insert";
pthread_mutex_lock(&di->mutex);
@@ -1630,18 +1688,44 @@ static int write_delayed(THD *thd,TABLE *table,enum_duplicates duplic,
pthread_cond_wait(&di->cond_client,&di->mutex);
thd->proc_info="storing row into queue";
- if (thd->killed || !(row= new delayed_row(duplic, ignore, log_on)))
+ if (thd->killed)
goto err;
+ /*
+ Take a copy of the query string, if there is any. The string will
+ be free'ed when the row is destroyed. If there is no query string,
+ we don't do anything special.
+ */
+
+ if (query.str)
+ {
+ char *str;
+ if (!(str= my_strndup(query.str, query.length, MYF(MY_WME))))
+ goto err;
+ query.str= str;
+ }
+ row= new delayed_row(query, duplic, ignore, log_on);
+ if (row == NULL)
+ {
+ my_free(query.str, MYF(MY_WME));
+ goto err;
+ }
+
if (!(row->record= (char*) my_malloc(table->s->reclength, MYF(MY_WME))))
goto err;
memcpy(row->record, table->record[0], table->s->reclength);
- di->set_query(query, query_length);
row->start_time= thd->start_time;
row->query_start_used= thd->query_start_used;
- row->last_insert_id_used= thd->last_insert_id_used;
- row->insert_id_used= thd->insert_id_used;
- row->last_insert_id= thd->last_insert_id;
+ /*
+ those are for the binlog: LAST_INSERT_ID() has been evaluated at this
+ time, so record does not need it, but statement-based binlogging of the
+ INSERT will need when the row is actually inserted.
+ As for SET INSERT_ID, DELAYED does not honour it (BUG#20830).
+ */
+ row->stmt_depends_on_first_successful_insert_id_in_prev_stmt=
+ thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt;
+ row->first_successful_insert_id_in_prev_stmt=
+ thd->first_successful_insert_id_in_prev_stmt;
row->timestamp_field_type= table->timestamp_field_type;
di->rows.push_back(row);
@@ -1895,6 +1979,7 @@ pthread_handler_t handle_delayed_insert(void *arg)
MYSQL_LOCK *lock=thd->lock;
thd->lock=0;
pthread_mutex_unlock(&di->mutex);
+ di->table->file->ha_release_auto_increment();
mysql_unlock_tables(thd, lock);
di->group_count=0;
pthread_mutex_lock(&di->mutex);
@@ -1970,9 +2055,8 @@ bool delayed_insert::handle_inserts(void)
{
int error;
ulong max_rows;
- bool using_ignore=0,
- using_bin_log= mysql_bin_log.is_open();
-
+ bool using_ignore= 0, using_opt_replace= 0,
+ using_bin_log= mysql_bin_log.is_open();
delayed_row *row;
DBUG_ENTER("handle_inserts");
@@ -1995,7 +2079,7 @@ bool delayed_insert::handle_inserts(void)
if (thd.killed || table->s->version != refresh_version)
{
thd.killed= THD::KILL_CONNECTION;
- max_rows= ~(ulong)0; // Do as much as possible
+ max_rows= ULONG_MAX; // Do as much as possible
}
/*
@@ -2007,13 +2091,6 @@ bool delayed_insert::handle_inserts(void)
table->file->extra(HA_EXTRA_WRITE_CACHE);
pthread_mutex_lock(&mutex);
- /* Reset auto-increment cacheing */
- if (thd.clear_next_insert_id)
- {
- thd.next_insert_id= 0;
- thd.clear_next_insert_id= 0;
- }
-
while ((row=rows.get()))
{
stacked_inserts--;
@@ -2022,9 +2099,12 @@ bool delayed_insert::handle_inserts(void)
thd.start_time=row->start_time;
thd.query_start_used=row->query_start_used;
- thd.last_insert_id=row->last_insert_id;
- thd.last_insert_id_used=row->last_insert_id_used;
- thd.insert_id_used=row->insert_id_used;
+ /* for the binlog, forget auto_increment ids generated by previous rows */
+// thd.auto_inc_intervals_in_cur_stmt_for_binlog.empty();
+ thd.first_successful_insert_id_in_prev_stmt=
+ row->first_successful_insert_id_in_prev_stmt;
+ thd.stmt_depends_on_first_successful_insert_id_in_prev_stmt=
+ row->stmt_depends_on_first_successful_insert_id_in_prev_stmt;
table->timestamp_field_type= row->timestamp_field_type;
info.ignore= row->ignore;
@@ -2035,6 +2115,13 @@ bool delayed_insert::handle_inserts(void)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
using_ignore=1;
}
+ if (info.handle_duplicates == DUP_REPLACE &&
+ (!table->triggers ||
+ !table->triggers->has_delete_triggers()))
+ {
+ table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
+ using_opt_replace= 1;
+ }
thd.clear_error(); // reset error for binlog
if (write_record(&thd, table, &info))
{
@@ -2042,11 +2129,33 @@ bool delayed_insert::handle_inserts(void)
thread_safe_increment(delayed_insert_errors,&LOCK_delayed_status);
row->log_query = 0;
}
+
if (using_ignore)
{
using_ignore=0;
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
}
+ if (using_opt_replace)
+ {
+ using_opt_replace= 0;
+ table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
+ }
+
+ if (row->log_query && row->query.str != NULL && mysql_bin_log.is_open())
+ {
+ /*
+ If the query has several rows to insert, only the first row will come
+ here. In row-based binlogging, this means that the first row will be
+ written to binlog as one Table_map event and one Rows event (due to an
+ event flush done in binlog_query()), then all other rows of this query
+ will be binlogged together as one single Table_map event and one
+ single Rows event.
+ */
+ thd.binlog_query(THD::ROW_QUERY_TYPE,
+ row->query.str, row->query.length,
+ FALSE, FALSE);
+ }
+
if (table->s->blob_fields)
free_delayed_insert_blobs(table);
thread_safe_sub(delayed_rows_in_use,1,&LOCK_delayed_status);
@@ -2093,13 +2202,25 @@ bool delayed_insert::handle_inserts(void)
pthread_cond_broadcast(&cond_client); // If waiting clients
}
}
-
thd.proc_info=0;
pthread_mutex_unlock(&mutex);
- /* After releasing the mutex, to prevent deadlocks. */
- if (mysql_bin_log.is_open())
- thd.binlog_query(THD::ROW_QUERY_TYPE, query, query_length, FALSE, FALSE);
+#ifdef HAVE_ROW_BASED_REPLICATION
+ /*
+ We need to flush the pending event when using row-based
+ replication since the flushing normally done in binlog_query() is
+ not done last in the statement: for delayed inserts, the insert
+ statement is logged *before* all rows are inserted.
+
+ We can flush the pending event without checking the thd->lock
+ since the delayed insert *thread* is not inside a stored function
+ or trigger.
+
+ TODO: Move the logging to last in the sequence of rows.
+ */
+ if (thd.current_stmt_binlog_row_based)
+ thd.binlog_flush_pending_rows_event(TRUE);
+#endif /* HAVE_ROW_BASED_REPLICATION */
if ((error=table->file->extra(HA_EXTRA_NO_CACHE)))
{ // This shouldn't happen
@@ -2187,7 +2308,7 @@ select_insert::select_insert(TABLE_LIST *table_list_par, TABLE *table_par,
enum_duplicates duplic,
bool ignore_check_option_errors)
:table_list(table_list_par), table(table_par), fields(fields_par),
- last_insert_id(0),
+ autoinc_value_of_last_inserted_row(0),
insert_into_view(table_list_par && table_list_par->view != 0)
{
bzero((char*) &info,sizeof(info));
@@ -2292,6 +2413,9 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
thd->cuted_fields=0;
if (info.ignore || info.handle_duplicates != DUP_ERROR)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
+ if (info.handle_duplicates == DUP_REPLACE &&
+ (!table->triggers || !table->triggers->has_delete_triggers()))
+ table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
thd->no_trans_update= 0;
thd->abort_on_warning= (!info.ignore &&
(thd->variables.sql_mode &
@@ -2301,6 +2425,10 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
check_that_all_fields_are_given_values(thd, table, table_list)) ||
table_list->prepare_where(thd, 0, TRUE) ||
table_list->prepare_check_option(thd));
+
+ if (!res)
+ table->mark_columns_needed_for_insert();
+
DBUG_RETURN(res);
}
@@ -2396,15 +2524,20 @@ bool select_insert::send_data(List<Item> &values)
if (table->next_number_field)
{
/*
+ If no value has been autogenerated so far, we need to remember the
+ value we just saw, we may need to send it to client in the end.
+ */
+ if (thd->first_successful_insert_id_in_cur_stmt == 0) // optimization
+ autoinc_value_of_last_inserted_row=
+ table->next_number_field->val_int();
+ /*
Clear auto-increment field for the next record, if triggers are used
we will clear it twice, but this should be cheap.
*/
table->next_number_field->reset();
- if (!last_insert_id && thd->insert_id_used)
- last_insert_id= thd->insert_id();
}
}
- table->file->release_auto_increment();
+ table->file->ha_release_auto_increment();
DBUG_RETURN(error);
}
@@ -2466,8 +2599,6 @@ void select_insert::send_error(uint errcode,const char *err)
{
if (!table->file->has_transactions())
{
- if (last_insert_id)
- thd->insert_id(last_insert_id); // For binary log
if (mysql_bin_log.is_open())
{
thd->binlog_query(THD::ROW_QUERY_TYPE, thd->query, thd->query_length,
@@ -2487,10 +2618,12 @@ void select_insert::send_error(uint errcode,const char *err)
bool select_insert::send_eof()
{
int error,error2;
+ ulonglong id;
DBUG_ENTER("select_insert::send_eof");
error= (!thd->prelocked_mode) ? table->file->ha_end_bulk_insert():0;
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
+ table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
if (info.copied || info.deleted || info.updated)
{
@@ -2512,8 +2645,6 @@ bool select_insert::send_eof()
thd->options|= OPTION_STATUS_NO_TRANS_UPDATE;
}
- if (last_insert_id)
- thd->insert_id(last_insert_id); // For binary log
/*
Write to binlog before commiting transaction. No statement will
be written by the binlog_query() below in RBR mode. All the
@@ -2543,7 +2674,13 @@ bool select_insert::send_eof()
sprintf(buff, ER(ER_INSERT_INFO), (ulong) info.records,
(ulong) (info.deleted+info.updated), (ulong) thd->cuted_fields);
thd->row_count_func= info.copied+info.deleted+info.updated;
- ::send_ok(thd, (ulong) thd->row_count_func, last_insert_id, buff);
+
+ id= (thd->first_successful_insert_id_in_cur_stmt > 0) ?
+ thd->first_successful_insert_id_in_cur_stmt :
+ (thd->arg_of_last_insert_id_function ?
+ thd->first_successful_insert_id_in_prev_stmt :
+ (info.copied ? autoinc_value_of_last_inserted_row : 0));
+ ::send_ok(thd, (ulong) thd->row_count_func, id, buff);
DBUG_RETURN(0);
}
@@ -2709,21 +2846,6 @@ static TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info,
}
-class MY_HOOKS : public TABLEOP_HOOKS
-{
-public:
- MY_HOOKS(select_create *x) : ptr(x) { }
- virtual void do_prelock(TABLE **tables, uint count)
- {
- if (ptr->get_thd()->current_stmt_binlog_row_based)
- ptr->binlog_show_create_table(tables, count);
- }
-
-private:
- select_create *ptr;
-};
-
-
int
select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
{
@@ -2736,8 +2858,9 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
MY_HOOKS(select_create *x) : ptr(x) { }
virtual void do_prelock(TABLE **tables, uint count)
{
- if (ptr->get_thd()->current_stmt_binlog_row_based)
- ptr->binlog_show_create_table(tables, count);
+ if (ptr->get_thd()->current_stmt_binlog_row_based &&
+ !(ptr->get_create_info()->options & HA_LEX_CREATE_TMP_TABLE))
+ ptr->binlog_show_create_table(tables, count);
}
private:
@@ -2775,6 +2898,9 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
thd->cuted_fields=0;
if (info.ignore || info.handle_duplicates != DUP_ERROR)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
+ if (info.handle_duplicates == DUP_REPLACE &&
+ (!table->triggers || !table->triggers->has_delete_triggers()))
+ table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
if (!thd->prelocked_mode)
table->file->ha_start_bulk_insert((ha_rows) 0);
thd->no_trans_update= 0;
@@ -2782,8 +2908,10 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
(thd->variables.sql_mode &
(MODE_STRICT_TRANS_TABLES |
MODE_STRICT_ALL_TABLES)));
- DBUG_RETURN(check_that_all_fields_are_given_values(thd, table,
- table_list));
+ if (check_that_all_fields_are_given_values(thd, table, table_list))
+ DBUG_RETURN(1);
+ table->mark_columns_needed_for_insert();
+ DBUG_RETURN(0);
}
@@ -2857,12 +2985,13 @@ bool select_create::send_eof()
else
{
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
+ table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
VOID(pthread_mutex_lock(&LOCK_open));
mysql_unlock_tables(thd, thd->extra_lock);
if (!table->s->tmp_table)
{
if (close_thread_table(thd, &table))
- VOID(pthread_cond_broadcast(&COND_refresh));
+ broadcast_refresh();
}
thd->extra_lock=0;
table=0;
@@ -2882,6 +3011,7 @@ void select_create::abort()
if (table)
{
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
+ table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
handlerton *table_type=table->s->db_type;
if (!table->s->tmp_table)
{
@@ -2892,7 +3022,7 @@ void select_create::abort()
quick_rm_table(table_type, create_table->db, create_table->table_name);
/* Tell threads waiting for refresh that something has happened */
if (version != refresh_version)
- VOID(pthread_cond_broadcast(&COND_refresh));
+ broadcast_refresh();
}
else if (!create_info->table_existed)
close_temporary_table(thd, table, 1, 1);
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index f6031a1f2fd..fe36b578f3b 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -42,8 +42,6 @@ sys_var *trg_new_row_fake_var= (sys_var*) 0x01;
#define yySkip() lex->ptr++
#define yyLength() ((uint) (lex->ptr - lex->tok_start)-1)
-pthread_key(LEX*,THR_LEX);
-
/* Longest standard keyword name */
#define TOCK_NAME_LENGTH 24
@@ -92,8 +90,6 @@ void lex_init(void)
for (i=0 ; i < array_elements(sql_functions) ; i++)
sql_functions[i].length=(uchar) strlen(sql_functions[i].name);
- VOID(pthread_key_create(&THR_LEX,NULL));
-
DBUG_VOID_RETURN;
}
@@ -141,6 +137,7 @@ void lex_start(THD *thd, const uchar *buf, uint length)
lex->select_lex.link_next= lex->select_lex.slave= lex->select_lex.next= 0;
lex->select_lex.link_prev= (st_select_lex_node**)&(lex->all_selects_list);
lex->select_lex.options= 0;
+ lex->select_lex.sql_cache= SELECT_LEX::SQL_CACHE_UNSPECIFIED;
lex->select_lex.init_order();
lex->select_lex.group_list.empty();
lex->describe= 0;
@@ -183,7 +180,6 @@ void lex_start(THD *thd, const uchar *buf, uint length)
lex->nest_level=0 ;
lex->allow_sum_func= 0;
lex->in_sum_func= NULL;
- lex->binlog_row_based_if_mixed= 0;
DBUG_VOID_RETURN;
}
@@ -1071,6 +1067,7 @@ int MYSQLlex(void *arg, void *yythd)
void st_select_lex_node::init_query()
{
options= 0;
+ sql_cache= SQL_CACHE_UNSPECIFIED;
linkage= UNSPECIFIED_TYPE;
no_error= no_table_names_allowed= 0;
uncacheable= 0;
@@ -1147,6 +1144,7 @@ void st_select_lex::init_select()
table_join_options= 0;
in_sum_expr= with_wild= 0;
options= 0;
+ sql_cache= SQL_CACHE_UNSPECIFIED;
braces= 0;
when_list.empty();
expr_list.empty();
@@ -1625,6 +1623,9 @@ void Query_tables_list::reset_query_tables_list(bool init)
sroutines_list.empty();
sroutines_list_own_last= sroutines_list.next;
sroutines_list_own_elements= 0;
+#ifdef HAVE_ROW_BASED_REPLICATION
+ binlog_row_based_if_mixed= FALSE;
+#endif
}
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index a46aaa0bab7..f42f70d4397 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -338,6 +338,14 @@ protected:
public:
ulonglong options;
+
+ /*
+ In sql_cache we store SQL_CACHE flag as specified by user to be
+ able to restore SELECT statement from internal structures.
+ */
+ enum e_sql_cache { SQL_CACHE_UNSPECIFIED, SQL_NO_CACHE, SQL_CACHE };
+ e_sql_cache sql_cache;
+
/*
result of this query can't be cached, bit field, can be :
UNCACHEABLE_DEPENDENT
@@ -793,6 +801,16 @@ public:
byte **sroutines_list_own_last;
uint sroutines_list_own_elements;
+#ifdef HAVE_ROW_BASED_REPLICATION
+ /*
+ Tells if the parsing stage detected that some items require row-based
+ binlogging to give a reliable binlog/replication, or if we will use
+ stored functions or triggers which themselves need require row-based
+ binlogging.
+ */
+ bool binlog_row_based_if_mixed;
+#endif
+
/*
These constructor and destructor serve for creation/destruction
of Query_tables_list instances which are used as backup storage.
@@ -808,6 +826,11 @@ public:
*this= *state;
}
+ /*
+ Direct addition to the list of query tables.
+ If you are using this function, you must ensure that the table
+ object, in particular table->db member, is initialized.
+ */
void add_to_query_tables(TABLE_LIST *table)
{
*(table->prev_global= query_tables_last)= table;
@@ -914,7 +937,7 @@ typedef struct st_lex : public Query_tables_list
List<Name_resolution_context> context_stack;
List<LEX_STRING> db_list;
- SQL_LIST proc_list, auxilliary_table_list, save_list;
+ SQL_LIST proc_list, auxiliary_table_list, save_list;
create_field *last_field;
Item_sum *in_sum_func;
udf_func udf;
@@ -970,11 +993,7 @@ typedef struct st_lex : public Query_tables_list
uint8 create_view_check;
bool drop_if_exists, drop_temporary, local_file, one_shot_set;
bool in_comment, ignore_space, verbose, no_write_to_binlog;
- /*
- binlog_row_based_if_mixed tells if the parsing stage detected that some
- items require row-based binlogging to give a reliable binlog/replication.
- */
- bool tx_chain, tx_release, binlog_row_based_if_mixed;
+ bool tx_chain, tx_release;
/*
Special JOIN::prepare mode: changing of query is prohibited.
When creating a view, we need to just check its syntax omitting
@@ -1172,8 +1191,4 @@ extern void lex_start(THD *thd, const uchar *buf, uint length);
extern void lex_end(LEX *lex);
extern int MYSQLlex(void *arg, void *yythd);
-extern pthread_key(LEX*,THR_LEX);
-
-#define current_lex (current_thd->lex)
-
-#endif
+#endif /* MYSQL_SERVER */
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index f8debbedc62..25cb7ff4c1e 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -187,9 +187,6 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
table= table_list->table;
transactional_table= table->file->has_transactions();
- if (table->found_next_number_field)
- table->mark_auto_increment_column();
-
if (!fields_vars.elements)
{
Field **field;
@@ -232,6 +229,8 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
DBUG_RETURN(TRUE);
}
+ table->mark_columns_needed_for_insert();
+
uint tot_length=0;
bool use_blobs= 0, use_vars= 0;
List_iterator_fast<Item> it(fields_vars);
@@ -362,6 +361,10 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
if (ignore ||
handle_duplicates == DUP_REPLACE)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
+ if (handle_duplicates == DUP_REPLACE &&
+ (!table->triggers ||
+ !table->triggers->has_delete_triggers()))
+ table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
if (!thd->prelocked_mode)
table->file->ha_start_bulk_insert((ha_rows) 0);
table->copy_blobs=1;
@@ -386,6 +389,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
error= 1;
}
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
+ table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
table->next_number_field=0;
}
ha_enable_transaction(thd, TRUE);
@@ -497,13 +501,12 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
error=ha_autocommit_or_rollback(thd,error);
err:
+ table->file->ha_release_auto_increment();
if (thd->lock)
{
mysql_unlock_tables(thd, thd->lock);
thd->lock=0;
}
- if (table != NULL)
- table->file->release_auto_increment();
thd->abort_on_warning= 0;
DBUG_RETURN(error);
}
@@ -639,14 +642,6 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
thd->no_trans_update= no_trans_update;
/*
- If auto_increment values are used, save the first one
- for LAST_INSERT_ID() and for the binary/update log.
- We can't use insert_id() as we don't want to touch the
- last_insert_id_used flag.
- */
- if (!id && thd->insert_id_used)
- id= thd->last_insert_id;
- /*
We don't need to reset auto-increment field since we are restoring
its default value at the beginning of each loop iteration.
*/
@@ -662,8 +657,6 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
thd->row_count++;
continue_loop:;
}
- if (id && !read_info.error)
- thd->insert_id(id); // For binary/update log
DBUG_RETURN(test(read_info.error));
}
@@ -807,14 +800,6 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
if (write_record(thd, table, &info))
DBUG_RETURN(1);
/*
- If auto_increment values are used, save the first one
- for LAST_INSERT_ID() and for the binary/update log.
- We can't use insert_id() as we don't want to touch the
- last_insert_id_used flag.
- */
- if (!id && thd->insert_id_used)
- id= thd->last_insert_id;
- /*
We don't need to reset auto-increment field since we are restoring
its default value at the beginning of each loop iteration.
*/
@@ -833,8 +818,6 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
thd->row_count++;
continue_loop:;
}
- if (id && !read_info.error)
- thd->insert_id(id); // For binary/update log
DBUG_RETURN(test(read_info.error));
}
diff --git a/sql/sql_locale.cc b/sql/sql_locale.cc
new file mode 100644
index 00000000000..9dae55e4508
--- /dev/null
+++ b/sql/sql_locale.cc
@@ -0,0 +1,1607 @@
+/* Copyright (C) 2005 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/*
+ The beginnings of locale(7) support.
+ Sponsored for subset of LC_TIME support, WorkLog entry 2928, -- Josh Chamas
+
+ !! This file is built from my_locale.pl !!
+*/
+
+#include "mysql_priv.h"
+
+
+MY_LOCALE *my_locale_by_name(const char *name)
+{
+ MY_LOCALE **locale;
+ for( locale= my_locales; *locale != NULL; locale++)
+ {
+ if(!strcmp((*locale)->name, name))
+ return *locale;
+ }
+ return NULL;
+}
+
+/***** LOCALE BEGIN ar_AE: Arabic - United Arab Emirates *****/
+static const char *my_locale_month_names_ar_AE[13] =
+ {"يناير","Ùبراير","مارس","أبريل","مايو","يونيو","يوليو","أغسطس","سبتمبر","أكتوبر","نوÙمبر","ديسمبر", NullS };
+static const char *my_locale_ab_month_names_ar_AE[13] =
+ {"ينا","Ùبر","مار","أبر","ماي","يون","يول","أغس","سبت","أكت","نوÙ","ديس", NullS };
+static const char *my_locale_day_names_ar_AE[8] =
+ {"الاثنين","الثلاثاء","الأربعاء","الخميس","الجمعة","السبت ","الأحد", NullS };
+static const char *my_locale_ab_day_names_ar_AE[8] =
+ {"ن","ث","ر","خ","ج","س","ح", NullS };
+static TYPELIB my_locale_typelib_month_names_ar_AE =
+ { array_elements(my_locale_month_names_ar_AE)-1, "", my_locale_month_names_ar_AE, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_ar_AE =
+ { array_elements(my_locale_ab_month_names_ar_AE)-1, "", my_locale_ab_month_names_ar_AE, NULL };
+static TYPELIB my_locale_typelib_day_names_ar_AE =
+ { array_elements(my_locale_day_names_ar_AE)-1, "", my_locale_day_names_ar_AE, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_ar_AE =
+ { array_elements(my_locale_ab_day_names_ar_AE)-1, "", my_locale_ab_day_names_ar_AE, NULL };
+MY_LOCALE my_locale_ar_AE=
+ { "ar_AE", "Arabic - United Arab Emirates", FALSE, &my_locale_typelib_month_names_ar_AE, &my_locale_typelib_ab_month_names_ar_AE, &my_locale_typelib_day_names_ar_AE, &my_locale_typelib_ab_day_names_ar_AE };
+/***** LOCALE END ar_AE *****/
+
+/***** LOCALE BEGIN ar_BH: Arabic - Bahrain *****/
+static const char *my_locale_month_names_ar_BH[13] =
+ {"يناير","Ùبراير","مارس","أبريل","مايو","يونيو","يوليو","أغسطس","سبتمبر","أكتوبر","نوÙمبر","ديسمبر", NullS };
+static const char *my_locale_ab_month_names_ar_BH[13] =
+ {"ينا","Ùبر","مار","أبر","ماي","يون","يول","أغس","سبت","أكت","نوÙ","ديس", NullS };
+static const char *my_locale_day_names_ar_BH[8] =
+ {"الاثنين","الثلاثاء","الأربعاء","الخميس","الجمعة","السبت","الأحد", NullS };
+static const char *my_locale_ab_day_names_ar_BH[8] =
+ {"ن","ث","ر","خ","ج","س","ح", NullS };
+static TYPELIB my_locale_typelib_month_names_ar_BH =
+ { array_elements(my_locale_month_names_ar_BH)-1, "", my_locale_month_names_ar_BH, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_ar_BH =
+ { array_elements(my_locale_ab_month_names_ar_BH)-1, "", my_locale_ab_month_names_ar_BH, NULL };
+static TYPELIB my_locale_typelib_day_names_ar_BH =
+ { array_elements(my_locale_day_names_ar_BH)-1, "", my_locale_day_names_ar_BH, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_ar_BH =
+ { array_elements(my_locale_ab_day_names_ar_BH)-1, "", my_locale_ab_day_names_ar_BH, NULL };
+MY_LOCALE my_locale_ar_BH=
+ { "ar_BH", "Arabic - Bahrain", FALSE, &my_locale_typelib_month_names_ar_BH, &my_locale_typelib_ab_month_names_ar_BH, &my_locale_typelib_day_names_ar_BH, &my_locale_typelib_ab_day_names_ar_BH };
+/***** LOCALE END ar_BH *****/
+
+/***** LOCALE BEGIN ar_JO: Arabic - Jordan *****/
+static const char *my_locale_month_names_ar_JO[13] =
+ {"كانون الثاني","شباط","آذار","نيسان","نوار","حزيران","تموز","آب","أيلول","تشرين الأول","تشرين الثاني","كانون الأول", NullS };
+static const char *my_locale_ab_month_names_ar_JO[13] =
+ {"كانون الثاني","شباط","آذار","نيسان","نوار","حزيران","تموز","آب","أيلول","تشرين الأول","تشرين الثاني","كانون الأول", NullS };
+static const char *my_locale_day_names_ar_JO[8] =
+ {"الاثنين","الثلاثاء","الأربعاء","الخميس","الجمعة","السبت","الأحد", NullS };
+static const char *my_locale_ab_day_names_ar_JO[8] =
+ {"الاثنين","الثلاثاء","الأربعاء","الخميس","الجمعة","السبت","الأحد", NullS };
+static TYPELIB my_locale_typelib_month_names_ar_JO =
+ { array_elements(my_locale_month_names_ar_JO)-1, "", my_locale_month_names_ar_JO, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_ar_JO =
+ { array_elements(my_locale_ab_month_names_ar_JO)-1, "", my_locale_ab_month_names_ar_JO, NULL };
+static TYPELIB my_locale_typelib_day_names_ar_JO =
+ { array_elements(my_locale_day_names_ar_JO)-1, "", my_locale_day_names_ar_JO, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_ar_JO =
+ { array_elements(my_locale_ab_day_names_ar_JO)-1, "", my_locale_ab_day_names_ar_JO, NULL };
+MY_LOCALE my_locale_ar_JO=
+ { "ar_JO", "Arabic - Jordan", FALSE, &my_locale_typelib_month_names_ar_JO, &my_locale_typelib_ab_month_names_ar_JO, &my_locale_typelib_day_names_ar_JO, &my_locale_typelib_ab_day_names_ar_JO };
+/***** LOCALE END ar_JO *****/
+
+/***** LOCALE BEGIN ar_SA: Arabic - Saudi Arabia *****/
+static const char *my_locale_month_names_ar_SA[13] =
+ {"كانون الثاني","شباط","آذار","نيسـان","أيار","حزيران","تـمـوز","آب","أيلول","تشرين الأول","تشرين الثاني","كانون الأول", NullS };
+static const char *my_locale_ab_month_names_ar_SA[13] =
+ {"Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec", NullS };
+static const char *my_locale_day_names_ar_SA[8] =
+ {"الإثنين","الثلاثاء","الأربعاء","الخميس","الجمعـة","السبت","الأحد", NullS };
+static const char *my_locale_ab_day_names_ar_SA[8] =
+ {"Mon","Tue","Wed","Thu","Fri","Sat","Sun", NullS };
+static TYPELIB my_locale_typelib_month_names_ar_SA =
+ { array_elements(my_locale_month_names_ar_SA)-1, "", my_locale_month_names_ar_SA, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_ar_SA =
+ { array_elements(my_locale_ab_month_names_ar_SA)-1, "", my_locale_ab_month_names_ar_SA, NULL };
+static TYPELIB my_locale_typelib_day_names_ar_SA =
+ { array_elements(my_locale_day_names_ar_SA)-1, "", my_locale_day_names_ar_SA, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_ar_SA =
+ { array_elements(my_locale_ab_day_names_ar_SA)-1, "", my_locale_ab_day_names_ar_SA, NULL };
+MY_LOCALE my_locale_ar_SA=
+ { "ar_SA", "Arabic - Saudi Arabia", FALSE, &my_locale_typelib_month_names_ar_SA, &my_locale_typelib_ab_month_names_ar_SA, &my_locale_typelib_day_names_ar_SA, &my_locale_typelib_ab_day_names_ar_SA };
+/***** LOCALE END ar_SA *****/
+
+/***** LOCALE BEGIN ar_SY: Arabic - Syria *****/
+static const char *my_locale_month_names_ar_SY[13] =
+ {"كانون الثاني","شباط","آذار","نيسان","نواران","حزير","تموز","آب","أيلول","تشرين الأول","تشرين الثاني","كانون الأول", NullS };
+static const char *my_locale_ab_month_names_ar_SY[13] =
+ {"كانون الثاني","شباط","آذار","نيسان","نوار","حزيران","تموز","آب","أيلول","تشرين الأول","تشرين الثاني","كانون الأول", NullS };
+static const char *my_locale_day_names_ar_SY[8] =
+ {"الاثنين","الثلاثاء","الأربعاء","الخميس","الجمعة","السبت","الأحد", NullS };
+static const char *my_locale_ab_day_names_ar_SY[8] =
+ {"الاثنين","الثلاثاء","الأربعاء","الخميس","الجمعة","السبت","الأحد", NullS };
+static TYPELIB my_locale_typelib_month_names_ar_SY =
+ { array_elements(my_locale_month_names_ar_SY)-1, "", my_locale_month_names_ar_SY, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_ar_SY =
+ { array_elements(my_locale_ab_month_names_ar_SY)-1, "", my_locale_ab_month_names_ar_SY, NULL };
+static TYPELIB my_locale_typelib_day_names_ar_SY =
+ { array_elements(my_locale_day_names_ar_SY)-1, "", my_locale_day_names_ar_SY, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_ar_SY =
+ { array_elements(my_locale_ab_day_names_ar_SY)-1, "", my_locale_ab_day_names_ar_SY, NULL };
+MY_LOCALE my_locale_ar_SY=
+ { "ar_SY", "Arabic - Syria", FALSE, &my_locale_typelib_month_names_ar_SY, &my_locale_typelib_ab_month_names_ar_SY, &my_locale_typelib_day_names_ar_SY, &my_locale_typelib_ab_day_names_ar_SY };
+/***** LOCALE END ar_SY *****/
+
+/***** LOCALE BEGIN be_BY: Belarusian - Belarus *****/
+static const char *my_locale_month_names_be_BY[13] =
+ {"Студзень","Люты","Сакавік","КраÑавік","Травень","ЧÑрвень","Ліпень","Жнівень","ВераÑень","КаÑтрычнік","ЛіÑтапад","Снежань", NullS };
+static const char *my_locale_ab_month_names_be_BY[13] =
+ {"Стд","Лют","Сак","КрÑ","Тра","ЧÑÑ€","Ліп","Жнв","Ð’Ñ€Ñ","КÑÑ‚","ЛіÑ","Снж", NullS };
+static const char *my_locale_day_names_be_BY[8] =
+ {"ПанÑдзелак","Ðўторак","Серада","Чацвер","ПÑтніца","Субота","ÐÑдзелÑ", NullS };
+static const char *my_locale_ab_day_names_be_BY[8] =
+ {"Пан","Ðўт","Срд","Чцв","ПÑÑ‚","Суб","ÐÑд", NullS };
+static TYPELIB my_locale_typelib_month_names_be_BY =
+ { array_elements(my_locale_month_names_be_BY)-1, "", my_locale_month_names_be_BY, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_be_BY =
+ { array_elements(my_locale_ab_month_names_be_BY)-1, "", my_locale_ab_month_names_be_BY, NULL };
+static TYPELIB my_locale_typelib_day_names_be_BY =
+ { array_elements(my_locale_day_names_be_BY)-1, "", my_locale_day_names_be_BY, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_be_BY =
+ { array_elements(my_locale_ab_day_names_be_BY)-1, "", my_locale_ab_day_names_be_BY, NULL };
+MY_LOCALE my_locale_be_BY=
+ { "be_BY", "Belarusian - Belarus", FALSE, &my_locale_typelib_month_names_be_BY, &my_locale_typelib_ab_month_names_be_BY, &my_locale_typelib_day_names_be_BY, &my_locale_typelib_ab_day_names_be_BY };
+/***** LOCALE END be_BY *****/
+
+/***** LOCALE BEGIN bg_BG: Bulgarian - Bulgaria *****/
+static const char *my_locale_month_names_bg_BG[13] =
+ {"Ñнуари","февруари","март","април","май","юни","юли","авгуÑÑ‚","Ñептември","октомври","ноември","декември", NullS };
+static const char *my_locale_ab_month_names_bg_BG[13] =
+ {"Ñну","фев","мар","апр","май","юни","юли","авг","Ñеп","окт","ное","дек", NullS };
+static const char *my_locale_day_names_bg_BG[8] =
+ {"понеделник","вторник","ÑÑ€Ñда","четвъртък","петък","Ñъбота","неделÑ", NullS };
+static const char *my_locale_ab_day_names_bg_BG[8] =
+ {"пн","вт","ÑÑ€","чт","пт","Ñб","нд", NullS };
+static TYPELIB my_locale_typelib_month_names_bg_BG =
+ { array_elements(my_locale_month_names_bg_BG)-1, "", my_locale_month_names_bg_BG, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_bg_BG =
+ { array_elements(my_locale_ab_month_names_bg_BG)-1, "", my_locale_ab_month_names_bg_BG, NULL };
+static TYPELIB my_locale_typelib_day_names_bg_BG =
+ { array_elements(my_locale_day_names_bg_BG)-1, "", my_locale_day_names_bg_BG, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_bg_BG =
+ { array_elements(my_locale_ab_day_names_bg_BG)-1, "", my_locale_ab_day_names_bg_BG, NULL };
+MY_LOCALE my_locale_bg_BG=
+ { "bg_BG", "Bulgarian - Bulgaria", FALSE, &my_locale_typelib_month_names_bg_BG, &my_locale_typelib_ab_month_names_bg_BG, &my_locale_typelib_day_names_bg_BG, &my_locale_typelib_ab_day_names_bg_BG };
+/***** LOCALE END bg_BG *****/
+
+/***** LOCALE BEGIN ca_ES: Catalan - Catalan *****/
+static const char *my_locale_month_names_ca_ES[13] =
+ {"gener","febrer","març","abril","maig","juny","juliol","agost","setembre","octubre","novembre","desembre", NullS };
+static const char *my_locale_ab_month_names_ca_ES[13] =
+ {"gen","feb","mar","abr","mai","jun","jul","ago","set","oct","nov","des", NullS };
+static const char *my_locale_day_names_ca_ES[8] =
+ {"dilluns","dimarts","dimecres","dijous","divendres","dissabte","diumenge", NullS };
+static const char *my_locale_ab_day_names_ca_ES[8] =
+ {"dl","dt","dc","dj","dv","ds","dg", NullS };
+static TYPELIB my_locale_typelib_month_names_ca_ES =
+ { array_elements(my_locale_month_names_ca_ES)-1, "", my_locale_month_names_ca_ES, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_ca_ES =
+ { array_elements(my_locale_ab_month_names_ca_ES)-1, "", my_locale_ab_month_names_ca_ES, NULL };
+static TYPELIB my_locale_typelib_day_names_ca_ES =
+ { array_elements(my_locale_day_names_ca_ES)-1, "", my_locale_day_names_ca_ES, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_ca_ES =
+ { array_elements(my_locale_ab_day_names_ca_ES)-1, "", my_locale_ab_day_names_ca_ES, NULL };
+MY_LOCALE my_locale_ca_ES=
+ { "ca_ES", "Catalan - Catalan", FALSE, &my_locale_typelib_month_names_ca_ES, &my_locale_typelib_ab_month_names_ca_ES, &my_locale_typelib_day_names_ca_ES, &my_locale_typelib_ab_day_names_ca_ES };
+/***** LOCALE END ca_ES *****/
+
+/***** LOCALE BEGIN cs_CZ: Czech - Czech Republic *****/
+static const char *my_locale_month_names_cs_CZ[13] =
+ {"leden","únor","bÅ™ezen","duben","kvÄ›ten","Äerven","Äervenec","srpen","září","říjen","listopad","prosinec", NullS };
+static const char *my_locale_ab_month_names_cs_CZ[13] =
+ {"led","úno","bÅ™e","dub","kvÄ›","Äen","Äec","srp","zář","říj","lis","pro", NullS };
+static const char *my_locale_day_names_cs_CZ[8] =
+ {"Pondělí","Úterý","Středa","Čtvrtek","Pátek","Sobota","Neděle", NullS };
+static const char *my_locale_ab_day_names_cs_CZ[8] =
+ {"Po","Út","St","Čt","Pá","So","Ne", NullS };
+static TYPELIB my_locale_typelib_month_names_cs_CZ =
+ { array_elements(my_locale_month_names_cs_CZ)-1, "", my_locale_month_names_cs_CZ, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_cs_CZ =
+ { array_elements(my_locale_ab_month_names_cs_CZ)-1, "", my_locale_ab_month_names_cs_CZ, NULL };
+static TYPELIB my_locale_typelib_day_names_cs_CZ =
+ { array_elements(my_locale_day_names_cs_CZ)-1, "", my_locale_day_names_cs_CZ, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_cs_CZ =
+ { array_elements(my_locale_ab_day_names_cs_CZ)-1, "", my_locale_ab_day_names_cs_CZ, NULL };
+MY_LOCALE my_locale_cs_CZ=
+ { "cs_CZ", "Czech - Czech Republic", FALSE, &my_locale_typelib_month_names_cs_CZ, &my_locale_typelib_ab_month_names_cs_CZ, &my_locale_typelib_day_names_cs_CZ, &my_locale_typelib_ab_day_names_cs_CZ };
+/***** LOCALE END cs_CZ *****/
+
+/***** LOCALE BEGIN da_DK: Danish - Denmark *****/
+static const char *my_locale_month_names_da_DK[13] =
+ {"januar","februar","marts","april","maj","juni","juli","august","september","oktober","november","december", NullS };
+static const char *my_locale_ab_month_names_da_DK[13] =
+ {"jan","feb","mar","apr","maj","jun","jul","aug","sep","okt","nov","dec", NullS };
+static const char *my_locale_day_names_da_DK[8] =
+ {"mandag","tirsdag","onsdag","torsdag","fredag","lørdag","søndag", NullS };
+static const char *my_locale_ab_day_names_da_DK[8] =
+ {"man","tir","ons","tor","fre","lør","søn", NullS };
+static TYPELIB my_locale_typelib_month_names_da_DK =
+ { array_elements(my_locale_month_names_da_DK)-1, "", my_locale_month_names_da_DK, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_da_DK =
+ { array_elements(my_locale_ab_month_names_da_DK)-1, "", my_locale_ab_month_names_da_DK, NULL };
+static TYPELIB my_locale_typelib_day_names_da_DK =
+ { array_elements(my_locale_day_names_da_DK)-1, "", my_locale_day_names_da_DK, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_da_DK =
+ { array_elements(my_locale_ab_day_names_da_DK)-1, "", my_locale_ab_day_names_da_DK, NULL };
+MY_LOCALE my_locale_da_DK=
+ { "da_DK", "Danish - Denmark", FALSE, &my_locale_typelib_month_names_da_DK, &my_locale_typelib_ab_month_names_da_DK, &my_locale_typelib_day_names_da_DK, &my_locale_typelib_ab_day_names_da_DK };
+/***** LOCALE END da_DK *****/
+
+/***** LOCALE BEGIN de_AT: German - Austria *****/
+static const char *my_locale_month_names_de_AT[13] =
+ {"Jänner","Feber","März","April","Mai","Juni","Juli","August","September","Oktober","November","Dezember", NullS };
+static const char *my_locale_ab_month_names_de_AT[13] =
+ {"Jän","Feb","Mär","Apr","Mai","Jun","Jul","Aug","Sep","Okt","Nov","Dez", NullS };
+static const char *my_locale_day_names_de_AT[8] =
+ {"Montag","Dienstag","Mittwoch","Donnerstag","Freitag","Samstag","Sonntag", NullS };
+static const char *my_locale_ab_day_names_de_AT[8] =
+ {"Mon","Die","Mit","Don","Fre","Sam","Son", NullS };
+static TYPELIB my_locale_typelib_month_names_de_AT =
+ { array_elements(my_locale_month_names_de_AT)-1, "", my_locale_month_names_de_AT, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_de_AT =
+ { array_elements(my_locale_ab_month_names_de_AT)-1, "", my_locale_ab_month_names_de_AT, NULL };
+static TYPELIB my_locale_typelib_day_names_de_AT =
+ { array_elements(my_locale_day_names_de_AT)-1, "", my_locale_day_names_de_AT, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_de_AT =
+ { array_elements(my_locale_ab_day_names_de_AT)-1, "", my_locale_ab_day_names_de_AT, NULL };
+MY_LOCALE my_locale_de_AT=
+ { "de_AT", "German - Austria", FALSE, &my_locale_typelib_month_names_de_AT, &my_locale_typelib_ab_month_names_de_AT, &my_locale_typelib_day_names_de_AT, &my_locale_typelib_ab_day_names_de_AT };
+/***** LOCALE END de_AT *****/
+
+/***** LOCALE BEGIN de_DE: German - Germany *****/
+static const char *my_locale_month_names_de_DE[13] =
+ {"Januar","Februar","März","April","Mai","Juni","Juli","August","September","Oktober","November","Dezember", NullS };
+static const char *my_locale_ab_month_names_de_DE[13] =
+ {"Jan","Feb","Mär","Apr","Mai","Jun","Jul","Aug","Sep","Okt","Nov","Dez", NullS };
+static const char *my_locale_day_names_de_DE[8] =
+ {"Montag","Dienstag","Mittwoch","Donnerstag","Freitag","Samstag","Sonntag", NullS };
+static const char *my_locale_ab_day_names_de_DE[8] =
+ {"Mo","Di","Mi","Do","Fr","Sa","So", NullS };
+static TYPELIB my_locale_typelib_month_names_de_DE =
+ { array_elements(my_locale_month_names_de_DE)-1, "", my_locale_month_names_de_DE, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_de_DE =
+ { array_elements(my_locale_ab_month_names_de_DE)-1, "", my_locale_ab_month_names_de_DE, NULL };
+static TYPELIB my_locale_typelib_day_names_de_DE =
+ { array_elements(my_locale_day_names_de_DE)-1, "", my_locale_day_names_de_DE, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_de_DE =
+ { array_elements(my_locale_ab_day_names_de_DE)-1, "", my_locale_ab_day_names_de_DE, NULL };
+MY_LOCALE my_locale_de_DE=
+ { "de_DE", "German - Germany", FALSE, &my_locale_typelib_month_names_de_DE, &my_locale_typelib_ab_month_names_de_DE, &my_locale_typelib_day_names_de_DE, &my_locale_typelib_ab_day_names_de_DE };
+/***** LOCALE END de_DE *****/
+
+/***** LOCALE BEGIN en_US: English - United States *****/
+static const char *my_locale_month_names_en_US[13] =
+ {"January","February","March","April","May","June","July","August","September","October","November","December", NullS };
+static const char *my_locale_ab_month_names_en_US[13] =
+ {"Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec", NullS };
+static const char *my_locale_day_names_en_US[8] =
+ {"Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","Sunday", NullS };
+static const char *my_locale_ab_day_names_en_US[8] =
+ {"Mon","Tue","Wed","Thu","Fri","Sat","Sun", NullS };
+static TYPELIB my_locale_typelib_month_names_en_US =
+ { array_elements(my_locale_month_names_en_US)-1, "", my_locale_month_names_en_US, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_en_US =
+ { array_elements(my_locale_ab_month_names_en_US)-1, "", my_locale_ab_month_names_en_US, NULL };
+static TYPELIB my_locale_typelib_day_names_en_US =
+ { array_elements(my_locale_day_names_en_US)-1, "", my_locale_day_names_en_US, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_en_US =
+ { array_elements(my_locale_ab_day_names_en_US)-1, "", my_locale_ab_day_names_en_US, NULL };
+MY_LOCALE my_locale_en_US=
+ { "en_US", "English - United States", TRUE, &my_locale_typelib_month_names_en_US, &my_locale_typelib_ab_month_names_en_US, &my_locale_typelib_day_names_en_US, &my_locale_typelib_ab_day_names_en_US };
+/***** LOCALE END en_US *****/
+
+/***** LOCALE BEGIN es_ES: Spanish - Spain *****/
+static const char *my_locale_month_names_es_ES[13] =
+ {"enero","febrero","marzo","abril","mayo","junio","julio","agosto","septiembre","octubre","noviembre","diciembre", NullS };
+static const char *my_locale_ab_month_names_es_ES[13] =
+ {"ene","feb","mar","abr","may","jun","jul","ago","sep","oct","nov","dic", NullS };
+static const char *my_locale_day_names_es_ES[8] =
+ {"lunes","martes","miércoles","jueves","viernes","sábado","domingo", NullS };
+static const char *my_locale_ab_day_names_es_ES[8] =
+ {"lun","mar","mié","jue","vie","sáb","dom", NullS };
+static TYPELIB my_locale_typelib_month_names_es_ES =
+ { array_elements(my_locale_month_names_es_ES)-1, "", my_locale_month_names_es_ES, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_es_ES =
+ { array_elements(my_locale_ab_month_names_es_ES)-1, "", my_locale_ab_month_names_es_ES, NULL };
+static TYPELIB my_locale_typelib_day_names_es_ES =
+ { array_elements(my_locale_day_names_es_ES)-1, "", my_locale_day_names_es_ES, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_es_ES =
+ { array_elements(my_locale_ab_day_names_es_ES)-1, "", my_locale_ab_day_names_es_ES, NULL };
+MY_LOCALE my_locale_es_ES=
+ { "es_ES", "Spanish - Spain", FALSE, &my_locale_typelib_month_names_es_ES, &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES };
+/***** LOCALE END es_ES *****/
+
+/***** LOCALE BEGIN et_EE: Estonian - Estonia *****/
+static const char *my_locale_month_names_et_EE[13] =
+ {"jaanuar","veebruar","märts","aprill","mai","juuni","juuli","august","september","oktoober","november","detsember", NullS };
+static const char *my_locale_ab_month_names_et_EE[13] =
+ {"jaan ","veebr","märts","apr ","mai ","juuni","juuli","aug ","sept ","okt ","nov ","dets ", NullS };
+static const char *my_locale_day_names_et_EE[8] =
+ {"esmaspäev","teisipäev","kolmapäev","neljapäev","reede","laupäev","pühapäev", NullS };
+static const char *my_locale_ab_day_names_et_EE[8] =
+ {"E","T","K","N","R","L","P", NullS };
+static TYPELIB my_locale_typelib_month_names_et_EE =
+ { array_elements(my_locale_month_names_et_EE)-1, "", my_locale_month_names_et_EE, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_et_EE =
+ { array_elements(my_locale_ab_month_names_et_EE)-1, "", my_locale_ab_month_names_et_EE, NULL };
+static TYPELIB my_locale_typelib_day_names_et_EE =
+ { array_elements(my_locale_day_names_et_EE)-1, "", my_locale_day_names_et_EE, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_et_EE =
+ { array_elements(my_locale_ab_day_names_et_EE)-1, "", my_locale_ab_day_names_et_EE, NULL };
+MY_LOCALE my_locale_et_EE=
+ { "et_EE", "Estonian - Estonia", FALSE, &my_locale_typelib_month_names_et_EE, &my_locale_typelib_ab_month_names_et_EE, &my_locale_typelib_day_names_et_EE, &my_locale_typelib_ab_day_names_et_EE };
+/***** LOCALE END et_EE *****/
+
+/***** LOCALE BEGIN eu_ES: Basque - Basque *****/
+static const char *my_locale_month_names_eu_ES[13] =
+ {"urtarrila","otsaila","martxoa","apirila","maiatza","ekaina","uztaila","abuztua","iraila","urria","azaroa","abendua", NullS };
+static const char *my_locale_ab_month_names_eu_ES[13] =
+ {"urt","ots","mar","api","mai","eka","uzt","abu","ira","urr","aza","abe", NullS };
+static const char *my_locale_day_names_eu_ES[8] =
+ {"astelehena","asteartea","asteazkena","osteguna","ostirala","larunbata","igandea", NullS };
+static const char *my_locale_ab_day_names_eu_ES[8] =
+ {"al.","ar.","az.","og.","or.","lr.","ig.", NullS };
+static TYPELIB my_locale_typelib_month_names_eu_ES =
+ { array_elements(my_locale_month_names_eu_ES)-1, "", my_locale_month_names_eu_ES, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_eu_ES =
+ { array_elements(my_locale_ab_month_names_eu_ES)-1, "", my_locale_ab_month_names_eu_ES, NULL };
+static TYPELIB my_locale_typelib_day_names_eu_ES =
+ { array_elements(my_locale_day_names_eu_ES)-1, "", my_locale_day_names_eu_ES, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_eu_ES =
+ { array_elements(my_locale_ab_day_names_eu_ES)-1, "", my_locale_ab_day_names_eu_ES, NULL };
+MY_LOCALE my_locale_eu_ES=
+ { "eu_ES", "Basque - Basque", TRUE, &my_locale_typelib_month_names_eu_ES, &my_locale_typelib_ab_month_names_eu_ES, &my_locale_typelib_day_names_eu_ES, &my_locale_typelib_ab_day_names_eu_ES };
+/***** LOCALE END eu_ES *****/
+
+/***** LOCALE BEGIN fi_FI: Finnish - Finland *****/
+static const char *my_locale_month_names_fi_FI[13] =
+ {"tammikuu","helmikuu","maaliskuu","huhtikuu","toukokuu","kesäkuu","heinäkuu","elokuu","syyskuu","lokakuu","marraskuu","joulukuu", NullS };
+static const char *my_locale_ab_month_names_fi_FI[13] =
+ {"tammi ","helmi ","maalis","huhti ","touko ","kesä  ","heinä ","elo   ","syys  ","loka  ","marras","joulu ", NullS };
+static const char *my_locale_day_names_fi_FI[8] =
+ {"maanantai","tiistai","keskiviikko","torstai","perjantai","lauantai","sunnuntai", NullS };
+static const char *my_locale_ab_day_names_fi_FI[8] =
+ {"ma","ti","ke","to","pe","la","su", NullS };
+static TYPELIB my_locale_typelib_month_names_fi_FI =
+ { array_elements(my_locale_month_names_fi_FI)-1, "", my_locale_month_names_fi_FI, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_fi_FI =
+ { array_elements(my_locale_ab_month_names_fi_FI)-1, "", my_locale_ab_month_names_fi_FI, NULL };
+static TYPELIB my_locale_typelib_day_names_fi_FI =
+ { array_elements(my_locale_day_names_fi_FI)-1, "", my_locale_day_names_fi_FI, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_fi_FI =
+ { array_elements(my_locale_ab_day_names_fi_FI)-1, "", my_locale_ab_day_names_fi_FI, NULL };
+MY_LOCALE my_locale_fi_FI=
+ { "fi_FI", "Finnish - Finland", FALSE, &my_locale_typelib_month_names_fi_FI, &my_locale_typelib_ab_month_names_fi_FI, &my_locale_typelib_day_names_fi_FI, &my_locale_typelib_ab_day_names_fi_FI };
+/***** LOCALE END fi_FI *****/
+
+/***** LOCALE BEGIN fo_FO: Faroese - Faroe Islands *****/
+static const char *my_locale_month_names_fo_FO[13] =
+ {"januar","februar","mars","apríl","mai","juni","juli","august","september","oktober","november","desember", NullS };
+static const char *my_locale_ab_month_names_fo_FO[13] =
+ {"jan","feb","mar","apr","mai","jun","jul","aug","sep","okt","nov","des", NullS };
+static const char *my_locale_day_names_fo_FO[8] =
+ {"mánadagur","týsdagur","mikudagur","hósdagur","fríggjadagur","leygardagur","sunnudagur", NullS };
+static const char *my_locale_ab_day_names_fo_FO[8] =
+ {"mán","týs","mik","hós","frí","ley","sun", NullS };
+static TYPELIB my_locale_typelib_month_names_fo_FO =
+ { array_elements(my_locale_month_names_fo_FO)-1, "", my_locale_month_names_fo_FO, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_fo_FO =
+ { array_elements(my_locale_ab_month_names_fo_FO)-1, "", my_locale_ab_month_names_fo_FO, NULL };
+static TYPELIB my_locale_typelib_day_names_fo_FO =
+ { array_elements(my_locale_day_names_fo_FO)-1, "", my_locale_day_names_fo_FO, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_fo_FO =
+ { array_elements(my_locale_ab_day_names_fo_FO)-1, "", my_locale_ab_day_names_fo_FO, NULL };
+MY_LOCALE my_locale_fo_FO=
+ { "fo_FO", "Faroese - Faroe Islands", FALSE, &my_locale_typelib_month_names_fo_FO, &my_locale_typelib_ab_month_names_fo_FO, &my_locale_typelib_day_names_fo_FO, &my_locale_typelib_ab_day_names_fo_FO };
+/***** LOCALE END fo_FO *****/
+
+/***** LOCALE BEGIN fr_FR: French - France *****/
+static const char *my_locale_month_names_fr_FR[13] =
+ {"janvier","février","mars","avril","mai","juin","juillet","août","septembre","octobre","novembre","décembre", NullS };
+static const char *my_locale_ab_month_names_fr_FR[13] =
+ {"jan","fév","mar","avr","mai","jun","jui","aoû","sep","oct","nov","déc", NullS };
+static const char *my_locale_day_names_fr_FR[8] =
+ {"lundi","mardi","mercredi","jeudi","vendredi","samedi","dimanche", NullS };
+static const char *my_locale_ab_day_names_fr_FR[8] =
+ {"lun","mar","mer","jeu","ven","sam","dim", NullS };
+static TYPELIB my_locale_typelib_month_names_fr_FR =
+ { array_elements(my_locale_month_names_fr_FR)-1, "", my_locale_month_names_fr_FR, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_fr_FR =
+ { array_elements(my_locale_ab_month_names_fr_FR)-1, "", my_locale_ab_month_names_fr_FR, NULL };
+static TYPELIB my_locale_typelib_day_names_fr_FR =
+ { array_elements(my_locale_day_names_fr_FR)-1, "", my_locale_day_names_fr_FR, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_fr_FR =
+ { array_elements(my_locale_ab_day_names_fr_FR)-1, "", my_locale_ab_day_names_fr_FR, NULL };
+MY_LOCALE my_locale_fr_FR=
+ { "fr_FR", "French - France", FALSE, &my_locale_typelib_month_names_fr_FR, &my_locale_typelib_ab_month_names_fr_FR, &my_locale_typelib_day_names_fr_FR, &my_locale_typelib_ab_day_names_fr_FR };
+/***** LOCALE END fr_FR *****/
+
+/***** LOCALE BEGIN gl_ES: Galician - Galician *****/
+static const char *my_locale_month_names_gl_ES[13] =
+ {"Xaneiro","Febreiro","Marzo","Abril","Maio","Xuño","Xullo","Agosto","Setembro","Outubro","Novembro","Decembro", NullS };
+static const char *my_locale_ab_month_names_gl_ES[13] =
+ {"Xan","Feb","Mar","Abr","Mai","Xuñ","Xul","Ago","Set","Out","Nov","Dec", NullS };
+static const char *my_locale_day_names_gl_ES[8] =
+ {"Luns","Martes","Mércores","Xoves","Venres","Sábado","Domingo", NullS };
+static const char *my_locale_ab_day_names_gl_ES[8] =
+ {"Lun","Mar","Mér","Xov","Ven","Sáb","Dom", NullS };
+static TYPELIB my_locale_typelib_month_names_gl_ES =
+ { array_elements(my_locale_month_names_gl_ES)-1, "", my_locale_month_names_gl_ES, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_gl_ES =
+ { array_elements(my_locale_ab_month_names_gl_ES)-1, "", my_locale_ab_month_names_gl_ES, NULL };
+static TYPELIB my_locale_typelib_day_names_gl_ES =
+ { array_elements(my_locale_day_names_gl_ES)-1, "", my_locale_day_names_gl_ES, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_gl_ES =
+ { array_elements(my_locale_ab_day_names_gl_ES)-1, "", my_locale_ab_day_names_gl_ES, NULL };
+MY_LOCALE my_locale_gl_ES=
+ { "gl_ES", "Galician - Galician", FALSE, &my_locale_typelib_month_names_gl_ES, &my_locale_typelib_ab_month_names_gl_ES, &my_locale_typelib_day_names_gl_ES, &my_locale_typelib_ab_day_names_gl_ES };
+/***** LOCALE END gl_ES *****/
+
+/***** LOCALE BEGIN gu_IN: Gujarati - India *****/
+static const char *my_locale_month_names_gu_IN[13] =
+ {"જાનà«àª¯à«àª†àª°à«€","ફેબà«àª°à«àª†àª°à«€","મારà«àªš","àªàªªà«àª°àª¿àª²","મે","જà«àª¨","જà«àª²àª¾àª‡","ઓગસà«àªŸ","સેપà«àªŸà«‡àª®à«àª¬àª°","ઓકà«àªŸà«‹àª¬àª°","નવેમà«àª¬àª°","ડિસેમà«àª¬àª°", NullS };
+static const char *my_locale_ab_month_names_gu_IN[13] =
+ {"જાન","ફેબ","માર","àªàªªà«àª°","મે","જà«àª¨","જà«àª²","ઓગ","સેપà«àªŸ","ઓકà«àªŸ","નોવ","ડિસ", NullS };
+static const char *my_locale_day_names_gu_IN[8] =
+ {"સોમવાર","મનà«àª—ળવાર","બà«àª§àªµàª¾àª°","ગà«àª°à«àªµàª¾àª°","શà«àª•à«àª°àªµàª¾àª°","શનિવાર","રવિવાર", NullS };
+static const char *my_locale_ab_day_names_gu_IN[8] =
+ {"સોમ","મનà«àª—ળ","બà«àª§","ગà«àª°à«","શà«àª•à«àª°","શનિ","રવિ", NullS };
+static TYPELIB my_locale_typelib_month_names_gu_IN =
+ { array_elements(my_locale_month_names_gu_IN)-1, "", my_locale_month_names_gu_IN, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_gu_IN =
+ { array_elements(my_locale_ab_month_names_gu_IN)-1, "", my_locale_ab_month_names_gu_IN, NULL };
+static TYPELIB my_locale_typelib_day_names_gu_IN =
+ { array_elements(my_locale_day_names_gu_IN)-1, "", my_locale_day_names_gu_IN, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_gu_IN =
+ { array_elements(my_locale_ab_day_names_gu_IN)-1, "", my_locale_ab_day_names_gu_IN, NULL };
+MY_LOCALE my_locale_gu_IN=
+ { "gu_IN", "Gujarati - India", FALSE, &my_locale_typelib_month_names_gu_IN, &my_locale_typelib_ab_month_names_gu_IN, &my_locale_typelib_day_names_gu_IN, &my_locale_typelib_ab_day_names_gu_IN };
+/***** LOCALE END gu_IN *****/
+
+/***** LOCALE BEGIN he_IL: Hebrew - Israel *****/
+static const char *my_locale_month_names_he_IL[13] =
+ {"ינו×ר","פברו×ר","מרץ","×פריל","מ××™","יוני","יולי","×וגוסט","ספטמבר","×וקטובר","נובמבר","דצמבר", NullS };
+static const char *my_locale_ab_month_names_he_IL[13] =
+ {"ינו","פבר","מרץ","×פר","מ××™","יונ","יול","×וג","ספט","×וק","נוב","דצמ", NullS };
+static const char *my_locale_day_names_he_IL[8] =
+ {"שני","שלישי","רביעי","חמישי","שישי","שבת","ר×שון", NullS };
+static const char *my_locale_ab_day_names_he_IL[8] =
+ {"ב'","×’'","ד'","×”'","ו'","ש'","×'", NullS };
+static TYPELIB my_locale_typelib_month_names_he_IL =
+ { array_elements(my_locale_month_names_he_IL)-1, "", my_locale_month_names_he_IL, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_he_IL =
+ { array_elements(my_locale_ab_month_names_he_IL)-1, "", my_locale_ab_month_names_he_IL, NULL };
+static TYPELIB my_locale_typelib_day_names_he_IL =
+ { array_elements(my_locale_day_names_he_IL)-1, "", my_locale_day_names_he_IL, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_he_IL =
+ { array_elements(my_locale_ab_day_names_he_IL)-1, "", my_locale_ab_day_names_he_IL, NULL };
+MY_LOCALE my_locale_he_IL=
+ { "he_IL", "Hebrew - Israel", FALSE, &my_locale_typelib_month_names_he_IL, &my_locale_typelib_ab_month_names_he_IL, &my_locale_typelib_day_names_he_IL, &my_locale_typelib_ab_day_names_he_IL };
+/***** LOCALE END he_IL *****/
+
+/***** LOCALE BEGIN hi_IN: Hindi - India *****/
+static const char *my_locale_month_names_hi_IN[13] =
+ {"जनवरी","फ़रवरी","मारà¥à¤š","अपà¥à¤°à¥‡à¤²","मई","जून","जà¥à¤²à¤¾à¤ˆ","अगसà¥à¤¤","सितमà¥à¤¬à¤°","अकà¥à¤Ÿà¥‚बर","नवमà¥à¤¬à¤°","दिसमà¥à¤¬à¤°", NullS };
+static const char *my_locale_ab_month_names_hi_IN[13] =
+ {"जनवरी","फ़रवरी","मारà¥à¤š","अपà¥à¤°à¥‡à¤²","मई","जून","जà¥à¤²à¤¾à¤ˆ","अगसà¥à¤¤","सितमà¥à¤¬à¤°","अकà¥à¤Ÿà¥‚बर","नवमà¥à¤¬à¤°","दिसमà¥à¤¬à¤°", NullS };
+static const char *my_locale_day_names_hi_IN[8] =
+ {"सोमवार ","मंगलवार ","बà¥à¤§à¤µà¤¾à¤° ","गà¥à¤°à¥à¤µà¤¾à¤° ","शà¥à¤•à¥à¤°à¤µà¤¾à¤° ","शनिवार ","रविवार ", NullS };
+static const char *my_locale_ab_day_names_hi_IN[8] =
+ {"सोम ","मंगल ","बà¥à¤§ ","गà¥à¤°à¥ ","शà¥à¤•à¥à¤° ","शनि ","रवि ", NullS };
+static TYPELIB my_locale_typelib_month_names_hi_IN =
+ { array_elements(my_locale_month_names_hi_IN)-1, "", my_locale_month_names_hi_IN, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_hi_IN =
+ { array_elements(my_locale_ab_month_names_hi_IN)-1, "", my_locale_ab_month_names_hi_IN, NULL };
+static TYPELIB my_locale_typelib_day_names_hi_IN =
+ { array_elements(my_locale_day_names_hi_IN)-1, "", my_locale_day_names_hi_IN, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_hi_IN =
+ { array_elements(my_locale_ab_day_names_hi_IN)-1, "", my_locale_ab_day_names_hi_IN, NULL };
+MY_LOCALE my_locale_hi_IN=
+ { "hi_IN", "Hindi - India", FALSE, &my_locale_typelib_month_names_hi_IN, &my_locale_typelib_ab_month_names_hi_IN, &my_locale_typelib_day_names_hi_IN, &my_locale_typelib_ab_day_names_hi_IN };
+/***** LOCALE END hi_IN *****/
+
+/***** LOCALE BEGIN hr_HR: Croatian - Croatia *****/
+static const char *my_locale_month_names_hr_HR[13] =
+ {"SijeÄanj","VeljaÄa","Ožujak","Travanj","Svibanj","Lipanj","Srpanj","Kolovoz","Rujan","Listopad","Studeni","Prosinac", NullS };
+static const char *my_locale_ab_month_names_hr_HR[13] =
+ {"Sij","Vel","Ožu","Tra","Svi","Lip","Srp","Kol","Ruj","Lis","Stu","Pro", NullS };
+static const char *my_locale_day_names_hr_HR[8] =
+ {"Ponedjeljak","Utorak","Srijeda","ÄŒetvrtak","Petak","Subota","Nedjelja", NullS };
+static const char *my_locale_ab_day_names_hr_HR[8] =
+ {"Pon","Uto","Sri","ÄŒet","Pet","Sub","Ned", NullS };
+static TYPELIB my_locale_typelib_month_names_hr_HR =
+ { array_elements(my_locale_month_names_hr_HR)-1, "", my_locale_month_names_hr_HR, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_hr_HR =
+ { array_elements(my_locale_ab_month_names_hr_HR)-1, "", my_locale_ab_month_names_hr_HR, NULL };
+static TYPELIB my_locale_typelib_day_names_hr_HR =
+ { array_elements(my_locale_day_names_hr_HR)-1, "", my_locale_day_names_hr_HR, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_hr_HR =
+ { array_elements(my_locale_ab_day_names_hr_HR)-1, "", my_locale_ab_day_names_hr_HR, NULL };
+MY_LOCALE my_locale_hr_HR=
+ { "hr_HR", "Croatian - Croatia", FALSE, &my_locale_typelib_month_names_hr_HR, &my_locale_typelib_ab_month_names_hr_HR, &my_locale_typelib_day_names_hr_HR, &my_locale_typelib_ab_day_names_hr_HR };
+/***** LOCALE END hr_HR *****/
+
+/***** LOCALE BEGIN hu_HU: Hungarian - Hungary *****/
+static const char *my_locale_month_names_hu_HU[13] =
+ {"január","február","március","április","május","június","július","augusztus","szeptember","október","november","december", NullS };
+static const char *my_locale_ab_month_names_hu_HU[13] =
+ {"jan","feb","már","ápr","máj","jún","júl","aug","sze","okt","nov","dec", NullS };
+static const char *my_locale_day_names_hu_HU[8] =
+ {"hétfő","kedd","szerda","csütörtök","péntek","szombat","vasárnap", NullS };
+static const char *my_locale_ab_day_names_hu_HU[8] =
+ {"h","k","sze","cs","p","szo","v", NullS };
+static TYPELIB my_locale_typelib_month_names_hu_HU =
+ { array_elements(my_locale_month_names_hu_HU)-1, "", my_locale_month_names_hu_HU, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_hu_HU =
+ { array_elements(my_locale_ab_month_names_hu_HU)-1, "", my_locale_ab_month_names_hu_HU, NULL };
+static TYPELIB my_locale_typelib_day_names_hu_HU =
+ { array_elements(my_locale_day_names_hu_HU)-1, "", my_locale_day_names_hu_HU, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_hu_HU =
+ { array_elements(my_locale_ab_day_names_hu_HU)-1, "", my_locale_ab_day_names_hu_HU, NULL };
+MY_LOCALE my_locale_hu_HU=
+ { "hu_HU", "Hungarian - Hungary", FALSE, &my_locale_typelib_month_names_hu_HU, &my_locale_typelib_ab_month_names_hu_HU, &my_locale_typelib_day_names_hu_HU, &my_locale_typelib_ab_day_names_hu_HU };
+/***** LOCALE END hu_HU *****/
+
+/***** LOCALE BEGIN id_ID: Indonesian - Indonesia *****/
+static const char *my_locale_month_names_id_ID[13] =
+ {"Januari","Pebruari","Maret","April","Mei","Juni","Juli","Agustus","September","Oktober","November","Desember", NullS };
+static const char *my_locale_ab_month_names_id_ID[13] =
+ {"Jan","Peb","Mar","Apr","Mei","Jun","Jul","Agu","Sep","Okt","Nov","Des", NullS };
+static const char *my_locale_day_names_id_ID[8] =
+ {"Senin","Selasa","Rabu","Kamis","Jumat","Sabtu","Minggu", NullS };
+static const char *my_locale_ab_day_names_id_ID[8] =
+ {"Sen","Sel","Rab","Kam","Jum","Sab","Min", NullS };
+static TYPELIB my_locale_typelib_month_names_id_ID =
+ { array_elements(my_locale_month_names_id_ID)-1, "", my_locale_month_names_id_ID, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_id_ID =
+ { array_elements(my_locale_ab_month_names_id_ID)-1, "", my_locale_ab_month_names_id_ID, NULL };
+static TYPELIB my_locale_typelib_day_names_id_ID =
+ { array_elements(my_locale_day_names_id_ID)-1, "", my_locale_day_names_id_ID, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_id_ID =
+ { array_elements(my_locale_ab_day_names_id_ID)-1, "", my_locale_ab_day_names_id_ID, NULL };
+MY_LOCALE my_locale_id_ID=
+ { "id_ID", "Indonesian - Indonesia", TRUE, &my_locale_typelib_month_names_id_ID, &my_locale_typelib_ab_month_names_id_ID, &my_locale_typelib_day_names_id_ID, &my_locale_typelib_ab_day_names_id_ID };
+/***** LOCALE END id_ID *****/
+
+/***** LOCALE BEGIN is_IS: Icelandic - Iceland *****/
+static const char *my_locale_month_names_is_IS[13] =
+ {"janúar","febrúar","mars","apríl","maí","júní","júlí","ágúst","september","október","nóvember","desember", NullS };
+static const char *my_locale_ab_month_names_is_IS[13] =
+ {"jan","feb","mar","apr","maí","jún","júl","ágú","sep","okt","nóv","des", NullS };
+static const char *my_locale_day_names_is_IS[8] =
+ {"mánudagur","þriðjudagur","miðvikudagur","fimmtudagur","föstudagur","laugardagur","sunnudagur", NullS };
+static const char *my_locale_ab_day_names_is_IS[8] =
+ {"mán","þri","mið","fim","fös","lau","sun", NullS };
+static TYPELIB my_locale_typelib_month_names_is_IS =
+ { array_elements(my_locale_month_names_is_IS)-1, "", my_locale_month_names_is_IS, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_is_IS =
+ { array_elements(my_locale_ab_month_names_is_IS)-1, "", my_locale_ab_month_names_is_IS, NULL };
+static TYPELIB my_locale_typelib_day_names_is_IS =
+ { array_elements(my_locale_day_names_is_IS)-1, "", my_locale_day_names_is_IS, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_is_IS =
+ { array_elements(my_locale_ab_day_names_is_IS)-1, "", my_locale_ab_day_names_is_IS, NULL };
+MY_LOCALE my_locale_is_IS=
+ { "is_IS", "Icelandic - Iceland", FALSE, &my_locale_typelib_month_names_is_IS, &my_locale_typelib_ab_month_names_is_IS, &my_locale_typelib_day_names_is_IS, &my_locale_typelib_ab_day_names_is_IS };
+/***** LOCALE END is_IS *****/
+
+/***** LOCALE BEGIN it_CH: Italian - Switzerland *****/
+static const char *my_locale_month_names_it_CH[13] =
+ {"gennaio","febbraio","marzo","aprile","maggio","giugno","luglio","agosto","settembre","ottobre","novembre","dicembre", NullS };
+static const char *my_locale_ab_month_names_it_CH[13] =
+ {"gen","feb","mar","apr","mag","giu","lug","ago","set","ott","nov","dic", NullS };
+static const char *my_locale_day_names_it_CH[8] =
+ {"lunedì","martedì","mercoledì","giovedì","venerdì","sabato","domenica", NullS };
+static const char *my_locale_ab_day_names_it_CH[8] =
+ {"lun","mar","mer","gio","ven","sab","dom", NullS };
+static TYPELIB my_locale_typelib_month_names_it_CH =
+ { array_elements(my_locale_month_names_it_CH)-1, "", my_locale_month_names_it_CH, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_it_CH =
+ { array_elements(my_locale_ab_month_names_it_CH)-1, "", my_locale_ab_month_names_it_CH, NULL };
+static TYPELIB my_locale_typelib_day_names_it_CH =
+ { array_elements(my_locale_day_names_it_CH)-1, "", my_locale_day_names_it_CH, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_it_CH =
+ { array_elements(my_locale_ab_day_names_it_CH)-1, "", my_locale_ab_day_names_it_CH, NULL };
+MY_LOCALE my_locale_it_CH=
+ { "it_CH", "Italian - Switzerland", FALSE, &my_locale_typelib_month_names_it_CH, &my_locale_typelib_ab_month_names_it_CH, &my_locale_typelib_day_names_it_CH, &my_locale_typelib_ab_day_names_it_CH };
+/***** LOCALE END it_CH *****/
+
+/***** LOCALE BEGIN ja_JP: Japanese - Japan *****/
+static const char *my_locale_month_names_ja_JP[13] =
+ {"1月","2月","3月","4月","5月","6月","7月","8月","9月","10月","11月","12月", NullS };
+static const char *my_locale_ab_month_names_ja_JP[13] =
+ {" 1月"," 2月"," 3月"," 4月"," 5月"," 6月"," 7月"," 8月"," 9月","10月","11月","12月", NullS };
+static const char *my_locale_day_names_ja_JP[8] =
+ {"月曜日","ç«æ›œæ—¥","水曜日","木曜日","金曜日","土曜日","日曜日", NullS };
+static const char *my_locale_ab_day_names_ja_JP[8] =
+ {"月","ç«","æ°´","木","金","土","æ—¥", NullS };
+static TYPELIB my_locale_typelib_month_names_ja_JP =
+ { array_elements(my_locale_month_names_ja_JP)-1, "", my_locale_month_names_ja_JP, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_ja_JP =
+ { array_elements(my_locale_ab_month_names_ja_JP)-1, "", my_locale_ab_month_names_ja_JP, NULL };
+static TYPELIB my_locale_typelib_day_names_ja_JP =
+ { array_elements(my_locale_day_names_ja_JP)-1, "", my_locale_day_names_ja_JP, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_ja_JP =
+ { array_elements(my_locale_ab_day_names_ja_JP)-1, "", my_locale_ab_day_names_ja_JP, NULL };
+MY_LOCALE my_locale_ja_JP=
+ { "ja_JP", "Japanese - Japan", FALSE, &my_locale_typelib_month_names_ja_JP, &my_locale_typelib_ab_month_names_ja_JP, &my_locale_typelib_day_names_ja_JP, &my_locale_typelib_ab_day_names_ja_JP };
+/***** LOCALE END ja_JP *****/
+
+/***** LOCALE BEGIN ko_KR: Korean - Korea *****/
+static const char *my_locale_month_names_ko_KR[13] =
+ {"ì¼ì›”","ì´ì›”","삼월","사월","오월","유월","ì¹ ì›”","팔월","구월","시월","ì‹­ì¼ì›”","ì‹­ì´ì›”", NullS };
+static const char *my_locale_ab_month_names_ko_KR[13] =
+ {" 1ì›”"," 2ì›”"," 3ì›”"," 4ì›”"," 5ì›”"," 6ì›”"," 7ì›”"," 8ì›”"," 9ì›”","10ì›”","11ì›”","12ì›”", NullS };
+static const char *my_locale_day_names_ko_KR[8] =
+ {"월요ì¼","화요ì¼","수요ì¼","목요ì¼","금요ì¼","토요ì¼","ì¼ìš”ì¼", NullS };
+static const char *my_locale_ab_day_names_ko_KR[8] =
+ {"ì›”","í™”","수","목","금","토","ì¼", NullS };
+static TYPELIB my_locale_typelib_month_names_ko_KR =
+ { array_elements(my_locale_month_names_ko_KR)-1, "", my_locale_month_names_ko_KR, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_ko_KR =
+ { array_elements(my_locale_ab_month_names_ko_KR)-1, "", my_locale_ab_month_names_ko_KR, NULL };
+static TYPELIB my_locale_typelib_day_names_ko_KR =
+ { array_elements(my_locale_day_names_ko_KR)-1, "", my_locale_day_names_ko_KR, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_ko_KR =
+ { array_elements(my_locale_ab_day_names_ko_KR)-1, "", my_locale_ab_day_names_ko_KR, NULL };
+MY_LOCALE my_locale_ko_KR=
+ { "ko_KR", "Korean - Korea", FALSE, &my_locale_typelib_month_names_ko_KR, &my_locale_typelib_ab_month_names_ko_KR, &my_locale_typelib_day_names_ko_KR, &my_locale_typelib_ab_day_names_ko_KR };
+/***** LOCALE END ko_KR *****/
+
+/***** LOCALE BEGIN lt_LT: Lithuanian - Lithuania *****/
+static const char *my_locale_month_names_lt_LT[13] =
+ {"sausio","vasario","kovo","balandžio","gegužės","birželio","liepos","rugpjÅ«Äio","rugsÄ—jo","spalio","lapkriÄio","gruodžio", NullS };
+static const char *my_locale_ab_month_names_lt_LT[13] =
+ {"Sau","Vas","Kov","Bal","Geg","Bir","Lie","Rgp","Rgs","Spa","Lap","Grd", NullS };
+static const char *my_locale_day_names_lt_LT[8] =
+ {"Pirmadienis","Antradienis","TreÄiadienis","Ketvirtadienis","Penktadienis","Å eÅ¡tadienis","Sekmadienis", NullS };
+static const char *my_locale_ab_day_names_lt_LT[8] =
+ {"Pr","An","Tr","Kt","Pn","Å t","Sk", NullS };
+static TYPELIB my_locale_typelib_month_names_lt_LT =
+ { array_elements(my_locale_month_names_lt_LT)-1, "", my_locale_month_names_lt_LT, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_lt_LT =
+ { array_elements(my_locale_ab_month_names_lt_LT)-1, "", my_locale_ab_month_names_lt_LT, NULL };
+static TYPELIB my_locale_typelib_day_names_lt_LT =
+ { array_elements(my_locale_day_names_lt_LT)-1, "", my_locale_day_names_lt_LT, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_lt_LT =
+ { array_elements(my_locale_ab_day_names_lt_LT)-1, "", my_locale_ab_day_names_lt_LT, NULL };
+MY_LOCALE my_locale_lt_LT=
+ { "lt_LT", "Lithuanian - Lithuania", FALSE, &my_locale_typelib_month_names_lt_LT, &my_locale_typelib_ab_month_names_lt_LT, &my_locale_typelib_day_names_lt_LT, &my_locale_typelib_ab_day_names_lt_LT };
+/***** LOCALE END lt_LT *****/
+
+/***** LOCALE BEGIN lv_LV: Latvian - Latvia *****/
+static const char *my_locale_month_names_lv_LV[13] =
+ {"janvÄris","februÄris","marts","aprÄ«lis","maijs","jÅ«nijs","jÅ«lijs","augusts","septembris","oktobris","novembris","decembris", NullS };
+static const char *my_locale_ab_month_names_lv_LV[13] =
+ {"jan","feb","mar","apr","mai","jūn","jūl","aug","sep","okt","nov","dec", NullS };
+static const char *my_locale_day_names_lv_LV[8] =
+ {"pirmdiena","otrdiena","trešdiena","ceturtdiena","piektdiena","sestdiena","svētdiena", NullS };
+static const char *my_locale_ab_day_names_lv_LV[8] =
+ {"P ","O ","T ","C ","Pk","S ","Sv", NullS };
+static TYPELIB my_locale_typelib_month_names_lv_LV =
+ { array_elements(my_locale_month_names_lv_LV)-1, "", my_locale_month_names_lv_LV, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_lv_LV =
+ { array_elements(my_locale_ab_month_names_lv_LV)-1, "", my_locale_ab_month_names_lv_LV, NULL };
+static TYPELIB my_locale_typelib_day_names_lv_LV =
+ { array_elements(my_locale_day_names_lv_LV)-1, "", my_locale_day_names_lv_LV, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_lv_LV =
+ { array_elements(my_locale_ab_day_names_lv_LV)-1, "", my_locale_ab_day_names_lv_LV, NULL };
+MY_LOCALE my_locale_lv_LV=
+ { "lv_LV", "Latvian - Latvia", FALSE, &my_locale_typelib_month_names_lv_LV, &my_locale_typelib_ab_month_names_lv_LV, &my_locale_typelib_day_names_lv_LV, &my_locale_typelib_ab_day_names_lv_LV };
+/***** LOCALE END lv_LV *****/
+
+/***** LOCALE BEGIN mk_MK: Macedonian - FYROM *****/
+static const char *my_locale_month_names_mk_MK[13] =
+ {"јануари","февруари","март","април","мај","јуни","јули","авгуÑÑ‚","Ñептември","октомври","ноември","декември", NullS };
+static const char *my_locale_ab_month_names_mk_MK[13] =
+ {"јан","фев","мар","апр","мај","јун","јул","авг","Ñеп","окт","ное","дек", NullS };
+static const char *my_locale_day_names_mk_MK[8] =
+ {"понеделник","вторник","Ñреда","четврток","петок","Ñабота","недела", NullS };
+static const char *my_locale_ab_day_names_mk_MK[8] =
+ {"пон","вто","Ñре","чет","пет","Ñаб","нед", NullS };
+static TYPELIB my_locale_typelib_month_names_mk_MK =
+ { array_elements(my_locale_month_names_mk_MK)-1, "", my_locale_month_names_mk_MK, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_mk_MK =
+ { array_elements(my_locale_ab_month_names_mk_MK)-1, "", my_locale_ab_month_names_mk_MK, NULL };
+static TYPELIB my_locale_typelib_day_names_mk_MK =
+ { array_elements(my_locale_day_names_mk_MK)-1, "", my_locale_day_names_mk_MK, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_mk_MK =
+ { array_elements(my_locale_ab_day_names_mk_MK)-1, "", my_locale_ab_day_names_mk_MK, NULL };
+MY_LOCALE my_locale_mk_MK=
+ { "mk_MK", "Macedonian - FYROM", FALSE, &my_locale_typelib_month_names_mk_MK, &my_locale_typelib_ab_month_names_mk_MK, &my_locale_typelib_day_names_mk_MK, &my_locale_typelib_ab_day_names_mk_MK };
+/***** LOCALE END mk_MK *****/
+
+/***** LOCALE BEGIN mn_MN: Mongolia - Mongolian *****/
+static const char *my_locale_month_names_mn_MN[13] =
+ {"ÐÑгдүгÑÑÑ€ Ñар","Хоёрдугаар Ñар","Гуравдугаар Ñар","ДөрөвдүгÑÑÑ€ Ñар","Тавдугаар Ñар","Зургаадугар Ñар","Долоодугаар Ñар","Ðаймдугаар Ñар","ЕÑдүгÑÑÑ€ Ñар","Ðравдугаар Ñар","ÐрваннÑгдүгÑÑÑ€ Ñар","Ðрванхоёрдгаар Ñар", NullS };
+static const char *my_locale_ab_month_names_mn_MN[13] =
+ {"1-Ñ€","2-Ñ€","3-Ñ€","4-Ñ€","5-Ñ€","6-Ñ€","7-Ñ€","8-Ñ€","9-Ñ€","10-Ñ€","11-Ñ€","12-Ñ€", NullS };
+static const char *my_locale_day_names_mn_MN[8] =
+ {"Даваа","ÐœÑгмар","Лхагва","ПүрÑв","БааÑан","БÑмба","ÐÑм", NullS };
+static const char *my_locale_ab_day_names_mn_MN[8] =
+ {"Да","ÐœÑ","Лх","Пү","Ба","БÑ","ÐÑ", NullS };
+static TYPELIB my_locale_typelib_month_names_mn_MN =
+ { array_elements(my_locale_month_names_mn_MN)-1, "", my_locale_month_names_mn_MN, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_mn_MN =
+ { array_elements(my_locale_ab_month_names_mn_MN)-1, "", my_locale_ab_month_names_mn_MN, NULL };
+static TYPELIB my_locale_typelib_day_names_mn_MN =
+ { array_elements(my_locale_day_names_mn_MN)-1, "", my_locale_day_names_mn_MN, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_mn_MN =
+ { array_elements(my_locale_ab_day_names_mn_MN)-1, "", my_locale_ab_day_names_mn_MN, NULL };
+MY_LOCALE my_locale_mn_MN=
+ { "mn_MN", "Mongolia - Mongolian", FALSE, &my_locale_typelib_month_names_mn_MN, &my_locale_typelib_ab_month_names_mn_MN, &my_locale_typelib_day_names_mn_MN, &my_locale_typelib_ab_day_names_mn_MN };
+/***** LOCALE END mn_MN *****/
+
+/***** LOCALE BEGIN ms_MY: Malay - Malaysia *****/
+static const char *my_locale_month_names_ms_MY[13] =
+ {"Januari","Februari","Mac","April","Mei","Jun","Julai","Ogos","September","Oktober","November","Disember", NullS };
+static const char *my_locale_ab_month_names_ms_MY[13] =
+ {"Jan","Feb","Mac","Apr","Mei","Jun","Jul","Ogos","Sep","Okt","Nov","Dis", NullS };
+static const char *my_locale_day_names_ms_MY[8] =
+ {"Isnin","Selasa","Rabu","Khamis","Jumaat","Sabtu","Ahad", NullS };
+static const char *my_locale_ab_day_names_ms_MY[8] =
+ {"Isn","Sel","Rab","Kha","Jum","Sab","Ahd", NullS };
+static TYPELIB my_locale_typelib_month_names_ms_MY =
+ { array_elements(my_locale_month_names_ms_MY)-1, "", my_locale_month_names_ms_MY, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_ms_MY =
+ { array_elements(my_locale_ab_month_names_ms_MY)-1, "", my_locale_ab_month_names_ms_MY, NULL };
+static TYPELIB my_locale_typelib_day_names_ms_MY =
+ { array_elements(my_locale_day_names_ms_MY)-1, "", my_locale_day_names_ms_MY, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_ms_MY =
+ { array_elements(my_locale_ab_day_names_ms_MY)-1, "", my_locale_ab_day_names_ms_MY, NULL };
+MY_LOCALE my_locale_ms_MY=
+ { "ms_MY", "Malay - Malaysia", TRUE, &my_locale_typelib_month_names_ms_MY, &my_locale_typelib_ab_month_names_ms_MY, &my_locale_typelib_day_names_ms_MY, &my_locale_typelib_ab_day_names_ms_MY };
+/***** LOCALE END ms_MY *****/
+
+/***** LOCALE BEGIN nb_NO: Norwegian(Bokml) - Norway *****/
+static const char *my_locale_month_names_nb_NO[13] =
+ {"januar","februar","mars","april","mai","juni","juli","august","september","oktober","november","desember", NullS };
+static const char *my_locale_ab_month_names_nb_NO[13] =
+ {"jan","feb","mar","apr","mai","jun","jul","aug","sep","okt","nov","des", NullS };
+static const char *my_locale_day_names_nb_NO[8] =
+ {"mandag","tirsdag","onsdag","torsdag","fredag","lørdag","søndag", NullS };
+static const char *my_locale_ab_day_names_nb_NO[8] =
+ {"man","tir","ons","tor","fre","lør","søn", NullS };
+static TYPELIB my_locale_typelib_month_names_nb_NO =
+ { array_elements(my_locale_month_names_nb_NO)-1, "", my_locale_month_names_nb_NO, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_nb_NO =
+ { array_elements(my_locale_ab_month_names_nb_NO)-1, "", my_locale_ab_month_names_nb_NO, NULL };
+static TYPELIB my_locale_typelib_day_names_nb_NO =
+ { array_elements(my_locale_day_names_nb_NO)-1, "", my_locale_day_names_nb_NO, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_nb_NO =
+ { array_elements(my_locale_ab_day_names_nb_NO)-1, "", my_locale_ab_day_names_nb_NO, NULL };
+MY_LOCALE my_locale_nb_NO=
+ { "nb_NO", "Norwegian(Bokml) - Norway", FALSE, &my_locale_typelib_month_names_nb_NO, &my_locale_typelib_ab_month_names_nb_NO, &my_locale_typelib_day_names_nb_NO, &my_locale_typelib_ab_day_names_nb_NO };
+/***** LOCALE END nb_NO *****/
+
+/***** LOCALE BEGIN nl_NL: Dutch - The Netherlands *****/
+static const char *my_locale_month_names_nl_NL[13] =
+ {"januari","februari","maart","april","mei","juni","juli","augustus","september","oktober","november","december", NullS };
+static const char *my_locale_ab_month_names_nl_NL[13] =
+ {"jan","feb","mrt","apr","mei","jun","jul","aug","sep","okt","nov","dec", NullS };
+static const char *my_locale_day_names_nl_NL[8] =
+ {"maandag","dinsdag","woensdag","donderdag","vrijdag","zaterdag","zondag", NullS };
+static const char *my_locale_ab_day_names_nl_NL[8] =
+ {"ma","di","wo","do","vr","za","zo", NullS };
+static TYPELIB my_locale_typelib_month_names_nl_NL =
+ { array_elements(my_locale_month_names_nl_NL)-1, "", my_locale_month_names_nl_NL, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_nl_NL =
+ { array_elements(my_locale_ab_month_names_nl_NL)-1, "", my_locale_ab_month_names_nl_NL, NULL };
+static TYPELIB my_locale_typelib_day_names_nl_NL =
+ { array_elements(my_locale_day_names_nl_NL)-1, "", my_locale_day_names_nl_NL, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_nl_NL =
+ { array_elements(my_locale_ab_day_names_nl_NL)-1, "", my_locale_ab_day_names_nl_NL, NULL };
+MY_LOCALE my_locale_nl_NL=
+ { "nl_NL", "Dutch - The Netherlands", TRUE, &my_locale_typelib_month_names_nl_NL, &my_locale_typelib_ab_month_names_nl_NL, &my_locale_typelib_day_names_nl_NL, &my_locale_typelib_ab_day_names_nl_NL };
+/***** LOCALE END nl_NL *****/
+
+/***** LOCALE BEGIN pl_PL: Polish - Poland *****/
+static const char *my_locale_month_names_pl_PL[13] =
+ {"styczeń","luty","marzec","kwiecień","maj","czerwiec","lipiec","sierpień","wrzesień","październik","listopad","grudzień", NullS };
+static const char *my_locale_ab_month_names_pl_PL[13] =
+ {"sty","lut","mar","kwi","maj","cze","lip","sie","wrz","paź","lis","gru", NullS };
+static const char *my_locale_day_names_pl_PL[8] =
+ {"poniedziałek","wtorek","środa","czwartek","piątek","sobota","niedziela", NullS };
+static const char *my_locale_ab_day_names_pl_PL[8] =
+ {"pon","wto","śro","czw","pią","sob","nie", NullS };
+static TYPELIB my_locale_typelib_month_names_pl_PL =
+ { array_elements(my_locale_month_names_pl_PL)-1, "", my_locale_month_names_pl_PL, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_pl_PL =
+ { array_elements(my_locale_ab_month_names_pl_PL)-1, "", my_locale_ab_month_names_pl_PL, NULL };
+static TYPELIB my_locale_typelib_day_names_pl_PL =
+ { array_elements(my_locale_day_names_pl_PL)-1, "", my_locale_day_names_pl_PL, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_pl_PL =
+ { array_elements(my_locale_ab_day_names_pl_PL)-1, "", my_locale_ab_day_names_pl_PL, NULL };
+MY_LOCALE my_locale_pl_PL=
+ { "pl_PL", "Polish - Poland", FALSE, &my_locale_typelib_month_names_pl_PL, &my_locale_typelib_ab_month_names_pl_PL, &my_locale_typelib_day_names_pl_PL, &my_locale_typelib_ab_day_names_pl_PL };
+/***** LOCALE END pl_PL *****/
+
+/***** LOCALE BEGIN pt_BR: Portugese - Brazil *****/
+static const char *my_locale_month_names_pt_BR[13] =
+ {"janeiro","fevereiro","março","abril","maio","junho","julho","agosto","setembro","outubro","novembro","dezembro", NullS };
+static const char *my_locale_ab_month_names_pt_BR[13] =
+ {"Jan","Fev","Mar","Abr","Mai","Jun","Jul","Ago","Set","Out","Nov","Dez", NullS };
+static const char *my_locale_day_names_pt_BR[8] =
+ {"segunda","terça","quarta","quinta","sexta","sábado","domingo", NullS };
+static const char *my_locale_ab_day_names_pt_BR[8] =
+ {"Seg","Ter","Qua","Qui","Sex","Sáb","Dom", NullS };
+static TYPELIB my_locale_typelib_month_names_pt_BR =
+ { array_elements(my_locale_month_names_pt_BR)-1, "", my_locale_month_names_pt_BR, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_pt_BR =
+ { array_elements(my_locale_ab_month_names_pt_BR)-1, "", my_locale_ab_month_names_pt_BR, NULL };
+static TYPELIB my_locale_typelib_day_names_pt_BR =
+ { array_elements(my_locale_day_names_pt_BR)-1, "", my_locale_day_names_pt_BR, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_pt_BR =
+ { array_elements(my_locale_ab_day_names_pt_BR)-1, "", my_locale_ab_day_names_pt_BR, NULL };
+MY_LOCALE my_locale_pt_BR=
+ { "pt_BR", "Portugese - Brazil", FALSE, &my_locale_typelib_month_names_pt_BR, &my_locale_typelib_ab_month_names_pt_BR, &my_locale_typelib_day_names_pt_BR, &my_locale_typelib_ab_day_names_pt_BR };
+/***** LOCALE END pt_BR *****/
+
+/***** LOCALE BEGIN pt_PT: Portugese - Portugal *****/
+static const char *my_locale_month_names_pt_PT[13] =
+ {"Janeiro","Fevereiro","Março","Abril","Maio","Junho","Julho","Agosto","Setembro","Outubro","Novembro","Dezembro", NullS };
+static const char *my_locale_ab_month_names_pt_PT[13] =
+ {"Jan","Fev","Mar","Abr","Mai","Jun","Jul","Ago","Set","Out","Nov","Dez", NullS };
+static const char *my_locale_day_names_pt_PT[8] =
+ {"Segunda","Terça","Quarta","Quinta","Sexta","Sábado","Domingo", NullS };
+static const char *my_locale_ab_day_names_pt_PT[8] =
+ {"Seg","Ter","Qua","Qui","Sex","Sáb","Dom", NullS };
+static TYPELIB my_locale_typelib_month_names_pt_PT =
+ { array_elements(my_locale_month_names_pt_PT)-1, "", my_locale_month_names_pt_PT, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_pt_PT =
+ { array_elements(my_locale_ab_month_names_pt_PT)-1, "", my_locale_ab_month_names_pt_PT, NULL };
+static TYPELIB my_locale_typelib_day_names_pt_PT =
+ { array_elements(my_locale_day_names_pt_PT)-1, "", my_locale_day_names_pt_PT, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_pt_PT =
+ { array_elements(my_locale_ab_day_names_pt_PT)-1, "", my_locale_ab_day_names_pt_PT, NULL };
+MY_LOCALE my_locale_pt_PT=
+ { "pt_PT", "Portugese - Portugal", FALSE, &my_locale_typelib_month_names_pt_PT, &my_locale_typelib_ab_month_names_pt_PT, &my_locale_typelib_day_names_pt_PT, &my_locale_typelib_ab_day_names_pt_PT };
+/***** LOCALE END pt_PT *****/
+
+/***** LOCALE BEGIN ro_RO: Romanian - Romania *****/
+static const char *my_locale_month_names_ro_RO[13] =
+ {"Ianuarie","Februarie","Martie","Aprilie","Mai","Iunie","Iulie","August","Septembrie","Octombrie","Noiembrie","Decembrie", NullS };
+static const char *my_locale_ab_month_names_ro_RO[13] =
+ {"ian","feb","mar","apr","mai","iun","iul","aug","sep","oct","nov","dec", NullS };
+static const char *my_locale_day_names_ro_RO[8] =
+ {"Luni","Marţi","Miercuri","Joi","Vineri","SîmbĂtĂ","DuminicĂ", NullS };
+static const char *my_locale_ab_day_names_ro_RO[8] =
+ {"Lu","Ma","Mi","Jo","Vi","Sî","Du", NullS };
+static TYPELIB my_locale_typelib_month_names_ro_RO =
+ { array_elements(my_locale_month_names_ro_RO)-1, "", my_locale_month_names_ro_RO, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_ro_RO =
+ { array_elements(my_locale_ab_month_names_ro_RO)-1, "", my_locale_ab_month_names_ro_RO, NULL };
+static TYPELIB my_locale_typelib_day_names_ro_RO =
+ { array_elements(my_locale_day_names_ro_RO)-1, "", my_locale_day_names_ro_RO, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_ro_RO =
+ { array_elements(my_locale_ab_day_names_ro_RO)-1, "", my_locale_ab_day_names_ro_RO, NULL };
+MY_LOCALE my_locale_ro_RO=
+ { "ro_RO", "Romanian - Romania", FALSE, &my_locale_typelib_month_names_ro_RO, &my_locale_typelib_ab_month_names_ro_RO, &my_locale_typelib_day_names_ro_RO, &my_locale_typelib_ab_day_names_ro_RO };
+/***** LOCALE END ro_RO *****/
+
+/***** LOCALE BEGIN ru_RU: Russian - Russia *****/
+static const char *my_locale_month_names_ru_RU[13] =
+ {"ЯнварÑ","ФевралÑ","Марта","ÐпрелÑ","МаÑ","ИюнÑ","ИюлÑ","ÐвгуÑта","СентÑбрÑ","ОктÑбрÑ","ÐоÑбрÑ","ДекабрÑ", NullS };
+static const char *my_locale_ab_month_names_ru_RU[13] =
+ {"Янв","Фев","Мар","Ðпр","Май","Июн","Июл","Ðвг","Сен","Окт","ÐоÑ","Дек", NullS };
+static const char *my_locale_day_names_ru_RU[8] =
+ {"Понедельник","Вторник","Среда","Четверг","ПÑтница","Суббота","ВоÑкреÑенье", NullS };
+static const char *my_locale_ab_day_names_ru_RU[8] =
+ {"Пнд","Ð’Ñ‚Ñ€","Срд","Чтв","Птн","Сбт","Ð’Ñк", NullS };
+static TYPELIB my_locale_typelib_month_names_ru_RU =
+ { array_elements(my_locale_month_names_ru_RU)-1, "", my_locale_month_names_ru_RU, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_ru_RU =
+ { array_elements(my_locale_ab_month_names_ru_RU)-1, "", my_locale_ab_month_names_ru_RU, NULL };
+static TYPELIB my_locale_typelib_day_names_ru_RU =
+ { array_elements(my_locale_day_names_ru_RU)-1, "", my_locale_day_names_ru_RU, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_ru_RU =
+ { array_elements(my_locale_ab_day_names_ru_RU)-1, "", my_locale_ab_day_names_ru_RU, NULL };
+MY_LOCALE my_locale_ru_RU=
+ { "ru_RU", "Russian - Russia", FALSE, &my_locale_typelib_month_names_ru_RU, &my_locale_typelib_ab_month_names_ru_RU, &my_locale_typelib_day_names_ru_RU, &my_locale_typelib_ab_day_names_ru_RU };
+/***** LOCALE END ru_RU *****/
+
+/***** LOCALE BEGIN ru_UA: Russian - Ukraine *****/
+static const char *my_locale_month_names_ru_UA[13] =
+ {"Январь","Февраль","Март","Ðпрель","Май","Июнь","Июль","ÐвгуÑÑ‚","СентÑбрь","ОктÑбрь","ÐоÑбрь","Декабрь", NullS };
+static const char *my_locale_ab_month_names_ru_UA[13] =
+ {"Янв","Фев","Мар","Ðпр","Май","Июн","Июл","Ðвг","Сен","Окт","ÐоÑ","Дек", NullS };
+static const char *my_locale_day_names_ru_UA[8] =
+ {"Понедельник","Вторник","Среда","Четверг","ПÑтница","Суббота","ВоÑкреÑенье", NullS };
+static const char *my_locale_ab_day_names_ru_UA[8] =
+ {"Пнд","Вто","Срд","Чтв","Птн","Суб","Ð’Ñк", NullS };
+static TYPELIB my_locale_typelib_month_names_ru_UA =
+ { array_elements(my_locale_month_names_ru_UA)-1, "", my_locale_month_names_ru_UA, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_ru_UA =
+ { array_elements(my_locale_ab_month_names_ru_UA)-1, "", my_locale_ab_month_names_ru_UA, NULL };
+static TYPELIB my_locale_typelib_day_names_ru_UA =
+ { array_elements(my_locale_day_names_ru_UA)-1, "", my_locale_day_names_ru_UA, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_ru_UA =
+ { array_elements(my_locale_ab_day_names_ru_UA)-1, "", my_locale_ab_day_names_ru_UA, NULL };
+MY_LOCALE my_locale_ru_UA=
+ { "ru_UA", "Russian - Ukraine", FALSE, &my_locale_typelib_month_names_ru_UA, &my_locale_typelib_ab_month_names_ru_UA, &my_locale_typelib_day_names_ru_UA, &my_locale_typelib_ab_day_names_ru_UA };
+/***** LOCALE END ru_UA *****/
+
+/***** LOCALE BEGIN sk_SK: Slovak - Slovakia *****/
+static const char *my_locale_month_names_sk_SK[13] =
+ {"január","február","marec","apríl","máj","jún","júl","august","september","október","november","december", NullS };
+static const char *my_locale_ab_month_names_sk_SK[13] =
+ {"jan","feb","mar","apr","máj","jún","júl","aug","sep","okt","nov","dec", NullS };
+static const char *my_locale_day_names_sk_SK[8] =
+ {"Pondelok","Utorok","Streda","Štvrtok","Piatok","Sobota","Nedeľa", NullS };
+static const char *my_locale_ab_day_names_sk_SK[8] =
+ {"Po","Ut","St","Å t","Pi","So","Ne", NullS };
+static TYPELIB my_locale_typelib_month_names_sk_SK =
+ { array_elements(my_locale_month_names_sk_SK)-1, "", my_locale_month_names_sk_SK, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_sk_SK =
+ { array_elements(my_locale_ab_month_names_sk_SK)-1, "", my_locale_ab_month_names_sk_SK, NULL };
+static TYPELIB my_locale_typelib_day_names_sk_SK =
+ { array_elements(my_locale_day_names_sk_SK)-1, "", my_locale_day_names_sk_SK, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_sk_SK =
+ { array_elements(my_locale_ab_day_names_sk_SK)-1, "", my_locale_ab_day_names_sk_SK, NULL };
+MY_LOCALE my_locale_sk_SK=
+ { "sk_SK", "Slovak - Slovakia", FALSE, &my_locale_typelib_month_names_sk_SK, &my_locale_typelib_ab_month_names_sk_SK, &my_locale_typelib_day_names_sk_SK, &my_locale_typelib_ab_day_names_sk_SK };
+/***** LOCALE END sk_SK *****/
+
+/***** LOCALE BEGIN sl_SI: Slovenian - Slovenia *****/
+static const char *my_locale_month_names_sl_SI[13] =
+ {"januar","februar","marec","april","maj","junij","julij","avgust","september","oktober","november","december", NullS };
+static const char *my_locale_ab_month_names_sl_SI[13] =
+ {"jan","feb","mar","apr","maj","jun","jul","avg","sep","okt","nov","dec", NullS };
+static const char *my_locale_day_names_sl_SI[8] =
+ {"ponedeljek","torek","sreda","Äetrtek","petek","sobota","nedelja", NullS };
+static const char *my_locale_ab_day_names_sl_SI[8] =
+ {"pon","tor","sre","Äet","pet","sob","ned", NullS };
+static TYPELIB my_locale_typelib_month_names_sl_SI =
+ { array_elements(my_locale_month_names_sl_SI)-1, "", my_locale_month_names_sl_SI, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_sl_SI =
+ { array_elements(my_locale_ab_month_names_sl_SI)-1, "", my_locale_ab_month_names_sl_SI, NULL };
+static TYPELIB my_locale_typelib_day_names_sl_SI =
+ { array_elements(my_locale_day_names_sl_SI)-1, "", my_locale_day_names_sl_SI, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_sl_SI =
+ { array_elements(my_locale_ab_day_names_sl_SI)-1, "", my_locale_ab_day_names_sl_SI, NULL };
+MY_LOCALE my_locale_sl_SI=
+ { "sl_SI", "Slovenian - Slovenia", FALSE, &my_locale_typelib_month_names_sl_SI, &my_locale_typelib_ab_month_names_sl_SI, &my_locale_typelib_day_names_sl_SI, &my_locale_typelib_ab_day_names_sl_SI };
+/***** LOCALE END sl_SI *****/
+
+/***** LOCALE BEGIN sq_AL: Albanian - Albania *****/
+static const char *my_locale_month_names_sq_AL[13] =
+ {"janar","shkurt","mars","prill","maj","qershor","korrik","gusht","shtator","tetor","nëntor","dhjetor", NullS };
+static const char *my_locale_ab_month_names_sq_AL[13] =
+ {"Jan","Shk","Mar","Pri","Maj","Qer","Kor","Gsh","Sht","Tet","Nën","Dhj", NullS };
+static const char *my_locale_day_names_sq_AL[8] =
+ {"e hënë ","e martë ","e mërkurë ","e enjte ","e premte ","e shtunë ","e diel ", NullS };
+static const char *my_locale_ab_day_names_sq_AL[8] =
+ {"Hën ","Mar ","Mër ","Enj ","Pre ","Sht ","Die ", NullS };
+static TYPELIB my_locale_typelib_month_names_sq_AL =
+ { array_elements(my_locale_month_names_sq_AL)-1, "", my_locale_month_names_sq_AL, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_sq_AL =
+ { array_elements(my_locale_ab_month_names_sq_AL)-1, "", my_locale_ab_month_names_sq_AL, NULL };
+static TYPELIB my_locale_typelib_day_names_sq_AL =
+ { array_elements(my_locale_day_names_sq_AL)-1, "", my_locale_day_names_sq_AL, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_sq_AL =
+ { array_elements(my_locale_ab_day_names_sq_AL)-1, "", my_locale_ab_day_names_sq_AL, NULL };
+MY_LOCALE my_locale_sq_AL=
+ { "sq_AL", "Albanian - Albania", FALSE, &my_locale_typelib_month_names_sq_AL, &my_locale_typelib_ab_month_names_sq_AL, &my_locale_typelib_day_names_sq_AL, &my_locale_typelib_ab_day_names_sq_AL };
+/***** LOCALE END sq_AL *****/
+
+/***** LOCALE BEGIN sr_YU: Servian - Yugoslavia *****/
+static const char *my_locale_month_names_sr_YU[13] =
+ {"januar","februar","mart","april","maj","juni","juli","avgust","septembar","oktobar","novembar","decembar", NullS };
+static const char *my_locale_ab_month_names_sr_YU[13] =
+ {"jan","feb","mar","apr","maj","jun","jul","avg","sep","okt","nov","dec", NullS };
+static const char *my_locale_day_names_sr_YU[8] =
+ {"ponedeljak","utorak","sreda","Äetvrtak","petak","subota","nedelja", NullS };
+static const char *my_locale_ab_day_names_sr_YU[8] =
+ {"pon","uto","sre","Äet","pet","sub","ned", NullS };
+static TYPELIB my_locale_typelib_month_names_sr_YU =
+ { array_elements(my_locale_month_names_sr_YU)-1, "", my_locale_month_names_sr_YU, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_sr_YU =
+ { array_elements(my_locale_ab_month_names_sr_YU)-1, "", my_locale_ab_month_names_sr_YU, NULL };
+static TYPELIB my_locale_typelib_day_names_sr_YU =
+ { array_elements(my_locale_day_names_sr_YU)-1, "", my_locale_day_names_sr_YU, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_sr_YU =
+ { array_elements(my_locale_ab_day_names_sr_YU)-1, "", my_locale_ab_day_names_sr_YU, NULL };
+MY_LOCALE my_locale_sr_YU=
+ { "sr_YU", "Servian - Yugoslavia", FALSE, &my_locale_typelib_month_names_sr_YU, &my_locale_typelib_ab_month_names_sr_YU, &my_locale_typelib_day_names_sr_YU, &my_locale_typelib_ab_day_names_sr_YU };
+/***** LOCALE END sr_YU *****/
+
+/***** LOCALE BEGIN sv_SE: Swedish - Sweden *****/
+static const char *my_locale_month_names_sv_SE[13] =
+ {"januari","februari","mars","april","maj","juni","juli","augusti","september","oktober","november","december", NullS };
+static const char *my_locale_ab_month_names_sv_SE[13] =
+ {"jan","feb","mar","apr","maj","jun","jul","aug","sep","okt","nov","dec", NullS };
+static const char *my_locale_day_names_sv_SE[8] =
+ {"måndag","tisdag","onsdag","torsdag","fredag","lördag","söndag", NullS };
+static const char *my_locale_ab_day_names_sv_SE[8] =
+ {"mån","tis","ons","tor","fre","lör","sön", NullS };
+static TYPELIB my_locale_typelib_month_names_sv_SE =
+ { array_elements(my_locale_month_names_sv_SE)-1, "", my_locale_month_names_sv_SE, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_sv_SE =
+ { array_elements(my_locale_ab_month_names_sv_SE)-1, "", my_locale_ab_month_names_sv_SE, NULL };
+static TYPELIB my_locale_typelib_day_names_sv_SE =
+ { array_elements(my_locale_day_names_sv_SE)-1, "", my_locale_day_names_sv_SE, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_sv_SE =
+ { array_elements(my_locale_ab_day_names_sv_SE)-1, "", my_locale_ab_day_names_sv_SE, NULL };
+MY_LOCALE my_locale_sv_SE=
+ { "sv_SE", "Swedish - Sweden", FALSE, &my_locale_typelib_month_names_sv_SE, &my_locale_typelib_ab_month_names_sv_SE, &my_locale_typelib_day_names_sv_SE, &my_locale_typelib_ab_day_names_sv_SE };
+/***** LOCALE END sv_SE *****/
+
+/***** LOCALE BEGIN ta_IN: Tamil - India *****/
+static const char *my_locale_month_names_ta_IN[13] =
+ {"ஜனவரி","பெபà¯à®°à®µà®°à®¿","மாரà¯à®šà¯","à®à®ªà¯à®°à®²à¯","மே","ஜூனà¯","ஜூலை","ஆகஸà¯à®Ÿà¯","செபà¯à®Ÿà®®à¯à®ªà®°à¯","அகà¯à®Ÿà¯‹à®ªà®°à¯","நவமà¯à®ªà®°à¯","டிசமà¯à®ªà®°à¯r", NullS };
+static const char *my_locale_ab_month_names_ta_IN[13] =
+ {"ஜனவரி","பெபà¯à®°à®µà®°à®¿","மாரà¯à®šà¯","à®à®ªà¯à®°à®²à¯","மே","ஜூனà¯","ஜூலை","ஆகஸà¯à®Ÿà¯","செபà¯à®Ÿà®®à¯à®ªà®°à¯","அகà¯à®Ÿà¯‹à®ªà®°à¯","நவமà¯à®ªà®°à¯","டிசமà¯à®ªà®°à¯r", NullS };
+static const char *my_locale_day_names_ta_IN[8] =
+ {"திஙà¯à®•à®³à¯","செவà¯à®µà®¾à®¯à¯","பà¯à®¤à®©à¯","வியாழனà¯","வெளà¯à®³à®¿","சனி","ஞாயிறà¯", NullS };
+static const char *my_locale_ab_day_names_ta_IN[8] =
+ {"த","ச","ப","வ","வ","ச","ஞ", NullS };
+static TYPELIB my_locale_typelib_month_names_ta_IN =
+ { array_elements(my_locale_month_names_ta_IN)-1, "", my_locale_month_names_ta_IN, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_ta_IN =
+ { array_elements(my_locale_ab_month_names_ta_IN)-1, "", my_locale_ab_month_names_ta_IN, NULL };
+static TYPELIB my_locale_typelib_day_names_ta_IN =
+ { array_elements(my_locale_day_names_ta_IN)-1, "", my_locale_day_names_ta_IN, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_ta_IN =
+ { array_elements(my_locale_ab_day_names_ta_IN)-1, "", my_locale_ab_day_names_ta_IN, NULL };
+MY_LOCALE my_locale_ta_IN=
+ { "ta_IN", "Tamil - India", FALSE, &my_locale_typelib_month_names_ta_IN, &my_locale_typelib_ab_month_names_ta_IN, &my_locale_typelib_day_names_ta_IN, &my_locale_typelib_ab_day_names_ta_IN };
+/***** LOCALE END ta_IN *****/
+
+/***** LOCALE BEGIN te_IN: Telugu - India *****/
+static const char *my_locale_month_names_te_IN[13] =
+ {"జనవరి","à°«à°¿à°¬à±à°°à°µà°°à°¿","మారà±à°šà°¿","à°à°ªà±à°°à°¿à°²à±","మే","జూనà±","జూలై","ఆగసà±à°Ÿà±","సెపà±à°Ÿà±†à°‚బరà±","à°…à°•à±à°Ÿà±‹à°¬à°°à±","నవంబరà±","డిసెంబరà±", NullS };
+static const char *my_locale_ab_month_names_te_IN[13] =
+ {"జనవరి","à°«à°¿à°¬à±à°°à°µà°°à°¿","మారà±à°šà°¿","à°à°ªà±à°°à°¿à°²à±","మే","జూనà±","జూలై","ఆగసà±à°Ÿà±","సెపà±à°Ÿà±†à°‚బరà±","à°…à°•à±à°Ÿà±‹à°¬à°°à±","నవంబరà±","డిసెంబరà±", NullS };
+static const char *my_locale_day_names_te_IN[8] =
+ {"సోమవారం","మంగళవారం","à°¬à±à°§à°µà°¾à°°à°‚","à°—à±à°°à±à°µà°¾à°°à°‚","à°¶à±à°•à±à°°à°µà°¾à°°à°‚","శనివారం","ఆదివారం", NullS };
+static const char *my_locale_ab_day_names_te_IN[8] =
+ {"సోమ","మంగళ","à°¬à±à°§","à°—à±à°°à±","à°¶à±à°•à±à°°","శని","ఆది", NullS };
+static TYPELIB my_locale_typelib_month_names_te_IN =
+ { array_elements(my_locale_month_names_te_IN)-1, "", my_locale_month_names_te_IN, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_te_IN =
+ { array_elements(my_locale_ab_month_names_te_IN)-1, "", my_locale_ab_month_names_te_IN, NULL };
+static TYPELIB my_locale_typelib_day_names_te_IN =
+ { array_elements(my_locale_day_names_te_IN)-1, "", my_locale_day_names_te_IN, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_te_IN =
+ { array_elements(my_locale_ab_day_names_te_IN)-1, "", my_locale_ab_day_names_te_IN, NULL };
+MY_LOCALE my_locale_te_IN=
+ { "te_IN", "Telugu - India", FALSE, &my_locale_typelib_month_names_te_IN, &my_locale_typelib_ab_month_names_te_IN, &my_locale_typelib_day_names_te_IN, &my_locale_typelib_ab_day_names_te_IN };
+/***** LOCALE END te_IN *****/
+
+/***** LOCALE BEGIN th_TH: Thai - Thailand *****/
+static const char *my_locale_month_names_th_TH[13] =
+ {"มà¸à¸£à¸²à¸„ม","à¸à¸¸à¸¡à¸ à¸²à¸žà¸±à¸™à¸˜à¹Œ","มีนาคม","เมษายน","พฤษภาคม","มิถุนายน","à¸à¸£à¸à¸Žà¸²à¸„ม","สิงหาคม","à¸à¸±à¸™à¸¢à¸²à¸¢à¸™","ตุลาคม","พฤศจิà¸à¸²à¸¢à¸™","ธันวาคม", NullS };
+static const char *my_locale_ab_month_names_th_TH[13] =
+ {"ม.ค.","à¸.พ.","มี.ค.","เม.ย.","พ.ค.","มิ.ย.","à¸.ค.","ส.ค.","à¸.ย.","ต.ค.","พ.ย.","ธ.ค.", NullS };
+static const char *my_locale_day_names_th_TH[8] =
+ {"จันทร์","อังคาร","พุธ","พฤหัสบดี","ศุà¸à¸£à¹Œ","เสาร์","อาทิตย์", NullS };
+static const char *my_locale_ab_day_names_th_TH[8] =
+ {"จ.","อ.","พ.","พฤ.","ศ.","ส.","อา.", NullS };
+static TYPELIB my_locale_typelib_month_names_th_TH =
+ { array_elements(my_locale_month_names_th_TH)-1, "", my_locale_month_names_th_TH, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_th_TH =
+ { array_elements(my_locale_ab_month_names_th_TH)-1, "", my_locale_ab_month_names_th_TH, NULL };
+static TYPELIB my_locale_typelib_day_names_th_TH =
+ { array_elements(my_locale_day_names_th_TH)-1, "", my_locale_day_names_th_TH, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_th_TH =
+ { array_elements(my_locale_ab_day_names_th_TH)-1, "", my_locale_ab_day_names_th_TH, NULL };
+MY_LOCALE my_locale_th_TH=
+ { "th_TH", "Thai - Thailand", FALSE, &my_locale_typelib_month_names_th_TH, &my_locale_typelib_ab_month_names_th_TH, &my_locale_typelib_day_names_th_TH, &my_locale_typelib_ab_day_names_th_TH };
+/***** LOCALE END th_TH *****/
+
+/***** LOCALE BEGIN tr_TR: Turkish - Turkey *****/
+static const char *my_locale_month_names_tr_TR[13] =
+ {"Ocak","Şubat","Mart","Nisan","Mayıs","Haziran","Temmuz","Ağustos","Eylül","Ekim","Kasım","Aralık", NullS };
+static const char *my_locale_ab_month_names_tr_TR[13] =
+ {"Oca","Åžub","Mar","Nis","May","Haz","Tem","AÄŸu","Eyl","Eki","Kas","Ara", NullS };
+static const char *my_locale_day_names_tr_TR[8] =
+ {"Pazartesi","Salı","Çarşamba","Perşembe","Cuma","Cumartesi","Pazar", NullS };
+static const char *my_locale_ab_day_names_tr_TR[8] =
+ {"Pzt","Sal","Çrş","Prş","Cum","Cts","Paz", NullS };
+static TYPELIB my_locale_typelib_month_names_tr_TR =
+ { array_elements(my_locale_month_names_tr_TR)-1, "", my_locale_month_names_tr_TR, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_tr_TR =
+ { array_elements(my_locale_ab_month_names_tr_TR)-1, "", my_locale_ab_month_names_tr_TR, NULL };
+static TYPELIB my_locale_typelib_day_names_tr_TR =
+ { array_elements(my_locale_day_names_tr_TR)-1, "", my_locale_day_names_tr_TR, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_tr_TR =
+ { array_elements(my_locale_ab_day_names_tr_TR)-1, "", my_locale_ab_day_names_tr_TR, NULL };
+MY_LOCALE my_locale_tr_TR=
+ { "tr_TR", "Turkish - Turkey", FALSE, &my_locale_typelib_month_names_tr_TR, &my_locale_typelib_ab_month_names_tr_TR, &my_locale_typelib_day_names_tr_TR, &my_locale_typelib_ab_day_names_tr_TR };
+/***** LOCALE END tr_TR *****/
+
+/***** LOCALE BEGIN uk_UA: Ukrainian - Ukraine *****/
+static const char *my_locale_month_names_uk_UA[13] =
+ {"Січень","Лютий","Березень","Квітень","Травень","Червень","Липень","Серпень","ВереÑень","Жовтень","ЛиÑтопад","Грудень", NullS };
+static const char *my_locale_ab_month_names_uk_UA[13] =
+ {"Січ","Лют","Бер","Кві","Тра","Чер","Лип","Сер","Вер","Жов","ЛиÑ","Гру", NullS };
+static const char *my_locale_day_names_uk_UA[8] =
+ {"Понеділок","Вівторок","Середа","Четвер","П'ÑтницÑ","Субота","ÐеділÑ", NullS };
+static const char *my_locale_ab_day_names_uk_UA[8] =
+ {"Пнд","Ð’Ñ‚Ñ€","Срд","Чтв","Птн","Сбт","Ðдл", NullS };
+static TYPELIB my_locale_typelib_month_names_uk_UA =
+ { array_elements(my_locale_month_names_uk_UA)-1, "", my_locale_month_names_uk_UA, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_uk_UA =
+ { array_elements(my_locale_ab_month_names_uk_UA)-1, "", my_locale_ab_month_names_uk_UA, NULL };
+static TYPELIB my_locale_typelib_day_names_uk_UA =
+ { array_elements(my_locale_day_names_uk_UA)-1, "", my_locale_day_names_uk_UA, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_uk_UA =
+ { array_elements(my_locale_ab_day_names_uk_UA)-1, "", my_locale_ab_day_names_uk_UA, NULL };
+MY_LOCALE my_locale_uk_UA=
+ { "uk_UA", "Ukrainian - Ukraine", FALSE, &my_locale_typelib_month_names_uk_UA, &my_locale_typelib_ab_month_names_uk_UA, &my_locale_typelib_day_names_uk_UA, &my_locale_typelib_ab_day_names_uk_UA };
+/***** LOCALE END uk_UA *****/
+
+/***** LOCALE BEGIN ur_PK: Urdu - Pakistan *****/
+static const char *my_locale_month_names_ur_PK[13] =
+ {"جنوري","Ùروري","مارچ","اپريل","مٓی","جون","جولاي","اگست","ستمبر","اكتوبر","نومبر","دسمبر", NullS };
+static const char *my_locale_ab_month_names_ur_PK[13] =
+ {"جنوري","Ùروري","مارچ","اپريل","مٓی","جون","جولاي","اگست","ستمبر","اكتوبر","نومبر","دسمبر", NullS };
+static const char *my_locale_day_names_ur_PK[8] =
+ {"پير","منگل","بدھ","جمعرات","جمعه","Ù‡Ùته","اتوار", NullS };
+static const char *my_locale_ab_day_names_ur_PK[8] =
+ {"پير","منگل","بدھ","جمعرات","جمعه","Ù‡Ùته","اتوار", NullS };
+static TYPELIB my_locale_typelib_month_names_ur_PK =
+ { array_elements(my_locale_month_names_ur_PK)-1, "", my_locale_month_names_ur_PK, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_ur_PK =
+ { array_elements(my_locale_ab_month_names_ur_PK)-1, "", my_locale_ab_month_names_ur_PK, NULL };
+static TYPELIB my_locale_typelib_day_names_ur_PK =
+ { array_elements(my_locale_day_names_ur_PK)-1, "", my_locale_day_names_ur_PK, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_ur_PK =
+ { array_elements(my_locale_ab_day_names_ur_PK)-1, "", my_locale_ab_day_names_ur_PK, NULL };
+MY_LOCALE my_locale_ur_PK=
+ { "ur_PK", "Urdu - Pakistan", FALSE, &my_locale_typelib_month_names_ur_PK, &my_locale_typelib_ab_month_names_ur_PK, &my_locale_typelib_day_names_ur_PK, &my_locale_typelib_ab_day_names_ur_PK };
+/***** LOCALE END ur_PK *****/
+
+/***** LOCALE BEGIN vi_VN: Vietnamese - Vietnam *****/
+static const char *my_locale_month_names_vi_VN[13] =
+ {"Tháng một","Tháng hai","Tháng ba","Tháng tư","Tháng năm","Tháng sáu","Tháng bảy","Tháng tám","Tháng chín","Tháng mười","Tháng mười một","Tháng mười hai", NullS };
+static const char *my_locale_ab_month_names_vi_VN[13] =
+ {"Thg 1","Thg 2","Thg 3","Thg 4","Thg 5","Thg 6","Thg 7","Thg 8","Thg 9","Thg 10","Thg 11","Thg 12", NullS };
+static const char *my_locale_day_names_vi_VN[8] =
+ {"ThÆ°Ì hai ","ThÆ°Ì ba ","ThÆ°Ì tÆ° ","ThÆ°Ì năm ","ThÆ°Ì sáu ","ThÆ°Ì bảy ","Chủ nhật ", NullS };
+static const char *my_locale_ab_day_names_vi_VN[8] =
+ {"Th 2 ","Th 3 ","Th 4 ","Th 5 ","Th 6 ","Th 7 ","CN ", NullS };
+static TYPELIB my_locale_typelib_month_names_vi_VN =
+ { array_elements(my_locale_month_names_vi_VN)-1, "", my_locale_month_names_vi_VN, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_vi_VN =
+ { array_elements(my_locale_ab_month_names_vi_VN)-1, "", my_locale_ab_month_names_vi_VN, NULL };
+static TYPELIB my_locale_typelib_day_names_vi_VN =
+ { array_elements(my_locale_day_names_vi_VN)-1, "", my_locale_day_names_vi_VN, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_vi_VN =
+ { array_elements(my_locale_ab_day_names_vi_VN)-1, "", my_locale_ab_day_names_vi_VN, NULL };
+MY_LOCALE my_locale_vi_VN=
+ { "vi_VN", "Vietnamese - Vietnam", FALSE, &my_locale_typelib_month_names_vi_VN, &my_locale_typelib_ab_month_names_vi_VN, &my_locale_typelib_day_names_vi_VN, &my_locale_typelib_ab_day_names_vi_VN };
+/***** LOCALE END vi_VN *****/
+
+/***** LOCALE BEGIN zh_CN: Chinese - Peoples Republic of China *****/
+static const char *my_locale_month_names_zh_CN[13] =
+ {"一月","二月","三月","四月","五月","六月","七月","八月","ä¹æœˆ","å月","å一月","å二月", NullS };
+static const char *my_locale_ab_month_names_zh_CN[13] =
+ {" 1月"," 2月"," 3月"," 4月"," 5月"," 6月"," 7月"," 8月"," 9月","10月","11月","12月", NullS };
+static const char *my_locale_day_names_zh_CN[8] =
+ {"星期一","星期二","星期三","星期四","星期五","星期六","星期日", NullS };
+static const char *my_locale_ab_day_names_zh_CN[8] =
+ {"一","二","三","四","五","六","日", NullS };
+static TYPELIB my_locale_typelib_month_names_zh_CN =
+ { array_elements(my_locale_month_names_zh_CN)-1, "", my_locale_month_names_zh_CN, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_zh_CN =
+ { array_elements(my_locale_ab_month_names_zh_CN)-1, "", my_locale_ab_month_names_zh_CN, NULL };
+static TYPELIB my_locale_typelib_day_names_zh_CN =
+ { array_elements(my_locale_day_names_zh_CN)-1, "", my_locale_day_names_zh_CN, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_zh_CN =
+ { array_elements(my_locale_ab_day_names_zh_CN)-1, "", my_locale_ab_day_names_zh_CN, NULL };
+MY_LOCALE my_locale_zh_CN=
+ { "zh_CN", "Chinese - Peoples Republic of China", FALSE, &my_locale_typelib_month_names_zh_CN, &my_locale_typelib_ab_month_names_zh_CN, &my_locale_typelib_day_names_zh_CN, &my_locale_typelib_ab_day_names_zh_CN };
+/***** LOCALE END zh_CN *****/
+
+/***** LOCALE BEGIN zh_TW: Chinese - Taiwan *****/
+static const char *my_locale_month_names_zh_TW[13] =
+ {"一月","二月","三月","四月","五月","六月","七月","八月","ä¹æœˆ","å月","å一月","å二月", NullS };
+static const char *my_locale_ab_month_names_zh_TW[13] =
+ {" 1月"," 2月"," 3月"," 4月"," 5月"," 6月"," 7月"," 8月"," 9月","10月","11月","12月", NullS };
+static const char *my_locale_day_names_zh_TW[8] =
+ {"週一","週二","週三","週四","週五","週六","週日", NullS };
+static const char *my_locale_ab_day_names_zh_TW[8] =
+ {"一","二","三","四","五","六","日", NullS };
+static TYPELIB my_locale_typelib_month_names_zh_TW =
+ { array_elements(my_locale_month_names_zh_TW)-1, "", my_locale_month_names_zh_TW, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_zh_TW =
+ { array_elements(my_locale_ab_month_names_zh_TW)-1, "", my_locale_ab_month_names_zh_TW, NULL };
+static TYPELIB my_locale_typelib_day_names_zh_TW =
+ { array_elements(my_locale_day_names_zh_TW)-1, "", my_locale_day_names_zh_TW, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_zh_TW =
+ { array_elements(my_locale_ab_day_names_zh_TW)-1, "", my_locale_ab_day_names_zh_TW, NULL };
+MY_LOCALE my_locale_zh_TW=
+ { "zh_TW", "Chinese - Taiwan", FALSE, &my_locale_typelib_month_names_zh_TW, &my_locale_typelib_ab_month_names_zh_TW, &my_locale_typelib_day_names_zh_TW, &my_locale_typelib_ab_day_names_zh_TW };
+/***** LOCALE END zh_TW *****/
+
+/***** LOCALE BEGIN ar_DZ: Arabic - Algeria *****/
+MY_LOCALE my_locale_ar_DZ=
+ { "ar_DZ", "Arabic - Algeria", FALSE, &my_locale_typelib_month_names_ar_BH, &my_locale_typelib_ab_month_names_ar_BH, &my_locale_typelib_day_names_ar_BH, &my_locale_typelib_ab_day_names_ar_BH };
+/***** LOCALE END ar_DZ *****/
+
+/***** LOCALE BEGIN ar_EG: Arabic - Egypt *****/
+MY_LOCALE my_locale_ar_EG=
+ { "ar_EG", "Arabic - Egypt", FALSE, &my_locale_typelib_month_names_ar_BH, &my_locale_typelib_ab_month_names_ar_BH, &my_locale_typelib_day_names_ar_BH, &my_locale_typelib_ab_day_names_ar_BH };
+/***** LOCALE END ar_EG *****/
+
+/***** LOCALE BEGIN ar_IN: Arabic - Iran *****/
+MY_LOCALE my_locale_ar_IN=
+ { "ar_IN", "Arabic - Iran", FALSE, &my_locale_typelib_month_names_ar_BH, &my_locale_typelib_ab_month_names_ar_BH, &my_locale_typelib_day_names_ar_BH, &my_locale_typelib_ab_day_names_ar_BH };
+/***** LOCALE END ar_IN *****/
+
+/***** LOCALE BEGIN ar_IQ: Arabic - Iraq *****/
+MY_LOCALE my_locale_ar_IQ=
+ { "ar_IQ", "Arabic - Iraq", FALSE, &my_locale_typelib_month_names_ar_BH, &my_locale_typelib_ab_month_names_ar_BH, &my_locale_typelib_day_names_ar_BH, &my_locale_typelib_ab_day_names_ar_BH };
+/***** LOCALE END ar_IQ *****/
+
+/***** LOCALE BEGIN ar_KW: Arabic - Kuwait *****/
+MY_LOCALE my_locale_ar_KW=
+ { "ar_KW", "Arabic - Kuwait", FALSE, &my_locale_typelib_month_names_ar_BH, &my_locale_typelib_ab_month_names_ar_BH, &my_locale_typelib_day_names_ar_BH, &my_locale_typelib_ab_day_names_ar_BH };
+/***** LOCALE END ar_KW *****/
+
+/***** LOCALE BEGIN ar_LB: Arabic - Lebanon *****/
+MY_LOCALE my_locale_ar_LB=
+ { "ar_LB", "Arabic - Lebanon", FALSE, &my_locale_typelib_month_names_ar_JO, &my_locale_typelib_ab_month_names_ar_JO, &my_locale_typelib_day_names_ar_JO, &my_locale_typelib_ab_day_names_ar_JO };
+/***** LOCALE END ar_LB *****/
+
+/***** LOCALE BEGIN ar_LY: Arabic - Libya *****/
+MY_LOCALE my_locale_ar_LY=
+ { "ar_LY", "Arabic - Libya", FALSE, &my_locale_typelib_month_names_ar_BH, &my_locale_typelib_ab_month_names_ar_BH, &my_locale_typelib_day_names_ar_BH, &my_locale_typelib_ab_day_names_ar_BH };
+/***** LOCALE END ar_LY *****/
+
+/***** LOCALE BEGIN ar_MA: Arabic - Morocco *****/
+MY_LOCALE my_locale_ar_MA=
+ { "ar_MA", "Arabic - Morocco", FALSE, &my_locale_typelib_month_names_ar_BH, &my_locale_typelib_ab_month_names_ar_BH, &my_locale_typelib_day_names_ar_BH, &my_locale_typelib_ab_day_names_ar_BH };
+/***** LOCALE END ar_MA *****/
+
+/***** LOCALE BEGIN ar_OM: Arabic - Oman *****/
+MY_LOCALE my_locale_ar_OM=
+ { "ar_OM", "Arabic - Oman", FALSE, &my_locale_typelib_month_names_ar_BH, &my_locale_typelib_ab_month_names_ar_BH, &my_locale_typelib_day_names_ar_BH, &my_locale_typelib_ab_day_names_ar_BH };
+/***** LOCALE END ar_OM *****/
+
+/***** LOCALE BEGIN ar_QA: Arabic - Qatar *****/
+MY_LOCALE my_locale_ar_QA=
+ { "ar_QA", "Arabic - Qatar", FALSE, &my_locale_typelib_month_names_ar_BH, &my_locale_typelib_ab_month_names_ar_BH, &my_locale_typelib_day_names_ar_BH, &my_locale_typelib_ab_day_names_ar_BH };
+/***** LOCALE END ar_QA *****/
+
+/***** LOCALE BEGIN ar_SD: Arabic - Sudan *****/
+MY_LOCALE my_locale_ar_SD=
+ { "ar_SD", "Arabic - Sudan", FALSE, &my_locale_typelib_month_names_ar_BH, &my_locale_typelib_ab_month_names_ar_BH, &my_locale_typelib_day_names_ar_BH, &my_locale_typelib_ab_day_names_ar_BH };
+/***** LOCALE END ar_SD *****/
+
+/***** LOCALE BEGIN ar_TN: Arabic - Tunisia *****/
+MY_LOCALE my_locale_ar_TN=
+ { "ar_TN", "Arabic - Tunisia", FALSE, &my_locale_typelib_month_names_ar_BH, &my_locale_typelib_ab_month_names_ar_BH, &my_locale_typelib_day_names_ar_BH, &my_locale_typelib_ab_day_names_ar_BH };
+/***** LOCALE END ar_TN *****/
+
+/***** LOCALE BEGIN ar_YE: Arabic - Yemen *****/
+MY_LOCALE my_locale_ar_YE=
+ { "ar_YE", "Arabic - Yemen", FALSE, &my_locale_typelib_month_names_ar_BH, &my_locale_typelib_ab_month_names_ar_BH, &my_locale_typelib_day_names_ar_BH, &my_locale_typelib_ab_day_names_ar_BH };
+/***** LOCALE END ar_YE *****/
+
+/***** LOCALE BEGIN de_BE: German - Belgium *****/
+MY_LOCALE my_locale_de_BE=
+ { "de_BE", "German - Belgium", FALSE, &my_locale_typelib_month_names_de_DE, &my_locale_typelib_ab_month_names_de_DE, &my_locale_typelib_day_names_de_DE, &my_locale_typelib_ab_day_names_de_DE };
+/***** LOCALE END de_BE *****/
+
+/***** LOCALE BEGIN de_CH: German - Switzerland *****/
+MY_LOCALE my_locale_de_CH=
+ { "de_CH", "German - Switzerland", FALSE, &my_locale_typelib_month_names_de_DE, &my_locale_typelib_ab_month_names_de_DE, &my_locale_typelib_day_names_de_DE, &my_locale_typelib_ab_day_names_de_DE };
+/***** LOCALE END de_CH *****/
+
+/***** LOCALE BEGIN de_LU: German - Luxembourg *****/
+MY_LOCALE my_locale_de_LU=
+ { "de_LU", "German - Luxembourg", FALSE, &my_locale_typelib_month_names_de_DE, &my_locale_typelib_ab_month_names_de_DE, &my_locale_typelib_day_names_de_DE, &my_locale_typelib_ab_day_names_de_DE };
+/***** LOCALE END de_LU *****/
+
+/***** LOCALE BEGIN en_AU: English - Australia *****/
+MY_LOCALE my_locale_en_AU=
+ { "en_AU", "English - Australia", TRUE, &my_locale_typelib_month_names_en_US, &my_locale_typelib_ab_month_names_en_US, &my_locale_typelib_day_names_en_US, &my_locale_typelib_ab_day_names_en_US };
+/***** LOCALE END en_AU *****/
+
+/***** LOCALE BEGIN en_CA: English - Canada *****/
+MY_LOCALE my_locale_en_CA=
+ { "en_CA", "English - Canada", TRUE, &my_locale_typelib_month_names_en_US, &my_locale_typelib_ab_month_names_en_US, &my_locale_typelib_day_names_en_US, &my_locale_typelib_ab_day_names_en_US };
+/***** LOCALE END en_CA *****/
+
+/***** LOCALE BEGIN en_GB: English - United Kingdom *****/
+MY_LOCALE my_locale_en_GB=
+ { "en_GB", "English - United Kingdom", TRUE, &my_locale_typelib_month_names_en_US, &my_locale_typelib_ab_month_names_en_US, &my_locale_typelib_day_names_en_US, &my_locale_typelib_ab_day_names_en_US };
+/***** LOCALE END en_GB *****/
+
+/***** LOCALE BEGIN en_IN: English - India *****/
+MY_LOCALE my_locale_en_IN=
+ { "en_IN", "English - India", TRUE, &my_locale_typelib_month_names_en_US, &my_locale_typelib_ab_month_names_en_US, &my_locale_typelib_day_names_en_US, &my_locale_typelib_ab_day_names_en_US };
+/***** LOCALE END en_IN *****/
+
+/***** LOCALE BEGIN en_NZ: English - New Zealand *****/
+MY_LOCALE my_locale_en_NZ=
+ { "en_NZ", "English - New Zealand", TRUE, &my_locale_typelib_month_names_en_US, &my_locale_typelib_ab_month_names_en_US, &my_locale_typelib_day_names_en_US, &my_locale_typelib_ab_day_names_en_US };
+/***** LOCALE END en_NZ *****/
+
+/***** LOCALE BEGIN en_PH: English - Philippines *****/
+MY_LOCALE my_locale_en_PH=
+ { "en_PH", "English - Philippines", TRUE, &my_locale_typelib_month_names_en_US, &my_locale_typelib_ab_month_names_en_US, &my_locale_typelib_day_names_en_US, &my_locale_typelib_ab_day_names_en_US };
+/***** LOCALE END en_PH *****/
+
+/***** LOCALE BEGIN en_ZA: English - South Africa *****/
+MY_LOCALE my_locale_en_ZA=
+ { "en_ZA", "English - South Africa", TRUE, &my_locale_typelib_month_names_en_US, &my_locale_typelib_ab_month_names_en_US, &my_locale_typelib_day_names_en_US, &my_locale_typelib_ab_day_names_en_US };
+/***** LOCALE END en_ZA *****/
+
+/***** LOCALE BEGIN en_ZW: English - Zimbabwe *****/
+MY_LOCALE my_locale_en_ZW=
+ { "en_ZW", "English - Zimbabwe", TRUE, &my_locale_typelib_month_names_en_US, &my_locale_typelib_ab_month_names_en_US, &my_locale_typelib_day_names_en_US, &my_locale_typelib_ab_day_names_en_US };
+/***** LOCALE END en_ZW *****/
+
+/***** LOCALE BEGIN es_AR: Spanish - Argentina *****/
+MY_LOCALE my_locale_es_AR=
+ { "es_AR", "Spanish - Argentina", FALSE, &my_locale_typelib_month_names_es_ES, &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES };
+/***** LOCALE END es_AR *****/
+
+/***** LOCALE BEGIN es_BO: Spanish - Bolivia *****/
+MY_LOCALE my_locale_es_BO=
+ { "es_BO", "Spanish - Bolivia", FALSE, &my_locale_typelib_month_names_es_ES, &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES };
+/***** LOCALE END es_BO *****/
+
+/***** LOCALE BEGIN es_CL: Spanish - Chile *****/
+MY_LOCALE my_locale_es_CL=
+ { "es_CL", "Spanish - Chile", FALSE, &my_locale_typelib_month_names_es_ES, &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES };
+/***** LOCALE END es_CL *****/
+
+/***** LOCALE BEGIN es_CO: Spanish - Columbia *****/
+MY_LOCALE my_locale_es_CO=
+ { "es_CO", "Spanish - Columbia", FALSE, &my_locale_typelib_month_names_es_ES, &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES };
+/***** LOCALE END es_CO *****/
+
+/***** LOCALE BEGIN es_CR: Spanish - Costa Rica *****/
+MY_LOCALE my_locale_es_CR=
+ { "es_CR", "Spanish - Costa Rica", FALSE, &my_locale_typelib_month_names_es_ES, &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES };
+/***** LOCALE END es_CR *****/
+
+/***** LOCALE BEGIN es_DO: Spanish - Dominican Republic *****/
+MY_LOCALE my_locale_es_DO=
+ { "es_DO", "Spanish - Dominican Republic", FALSE, &my_locale_typelib_month_names_es_ES, &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES };
+/***** LOCALE END es_DO *****/
+
+/***** LOCALE BEGIN es_EC: Spanish - Ecuador *****/
+MY_LOCALE my_locale_es_EC=
+ { "es_EC", "Spanish - Ecuador", FALSE, &my_locale_typelib_month_names_es_ES, &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES };
+/***** LOCALE END es_EC *****/
+
+/***** LOCALE BEGIN es_GT: Spanish - Guatemala *****/
+MY_LOCALE my_locale_es_GT=
+ { "es_GT", "Spanish - Guatemala", FALSE, &my_locale_typelib_month_names_es_ES, &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES };
+/***** LOCALE END es_GT *****/
+
+/***** LOCALE BEGIN es_HN: Spanish - Honduras *****/
+MY_LOCALE my_locale_es_HN=
+ { "es_HN", "Spanish - Honduras", FALSE, &my_locale_typelib_month_names_es_ES, &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES };
+/***** LOCALE END es_HN *****/
+
+/***** LOCALE BEGIN es_MX: Spanish - Mexico *****/
+MY_LOCALE my_locale_es_MX=
+ { "es_MX", "Spanish - Mexico", FALSE, &my_locale_typelib_month_names_es_ES, &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES };
+/***** LOCALE END es_MX *****/
+
+/***** LOCALE BEGIN es_NI: Spanish - Nicaragua *****/
+MY_LOCALE my_locale_es_NI=
+ { "es_NI", "Spanish - Nicaragua", FALSE, &my_locale_typelib_month_names_es_ES, &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES };
+/***** LOCALE END es_NI *****/
+
+/***** LOCALE BEGIN es_PA: Spanish - Panama *****/
+MY_LOCALE my_locale_es_PA=
+ { "es_PA", "Spanish - Panama", FALSE, &my_locale_typelib_month_names_es_ES, &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES };
+/***** LOCALE END es_PA *****/
+
+/***** LOCALE BEGIN es_PE: Spanish - Peru *****/
+MY_LOCALE my_locale_es_PE=
+ { "es_PE", "Spanish - Peru", FALSE, &my_locale_typelib_month_names_es_ES, &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES };
+/***** LOCALE END es_PE *****/
+
+/***** LOCALE BEGIN es_PR: Spanish - Puerto Rico *****/
+MY_LOCALE my_locale_es_PR=
+ { "es_PR", "Spanish - Puerto Rico", FALSE, &my_locale_typelib_month_names_es_ES, &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES };
+/***** LOCALE END es_PR *****/
+
+/***** LOCALE BEGIN es_PY: Spanish - Paraguay *****/
+MY_LOCALE my_locale_es_PY=
+ { "es_PY", "Spanish - Paraguay", FALSE, &my_locale_typelib_month_names_es_ES, &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES };
+/***** LOCALE END es_PY *****/
+
+/***** LOCALE BEGIN es_SV: Spanish - El Salvador *****/
+MY_LOCALE my_locale_es_SV=
+ { "es_SV", "Spanish - El Salvador", FALSE, &my_locale_typelib_month_names_es_ES, &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES };
+/***** LOCALE END es_SV *****/
+
+/***** LOCALE BEGIN es_US: Spanish - United States *****/
+MY_LOCALE my_locale_es_US=
+ { "es_US", "Spanish - United States", FALSE, &my_locale_typelib_month_names_es_ES, &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES };
+/***** LOCALE END es_US *****/
+
+/***** LOCALE BEGIN es_UY: Spanish - Uruguay *****/
+MY_LOCALE my_locale_es_UY=
+ { "es_UY", "Spanish - Uruguay", FALSE, &my_locale_typelib_month_names_es_ES, &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES };
+/***** LOCALE END es_UY *****/
+
+/***** LOCALE BEGIN es_VE: Spanish - Venezuela *****/
+MY_LOCALE my_locale_es_VE=
+ { "es_VE", "Spanish - Venezuela", FALSE, &my_locale_typelib_month_names_es_ES, &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES };
+/***** LOCALE END es_VE *****/
+
+/***** LOCALE BEGIN fr_BE: French - Belgium *****/
+MY_LOCALE my_locale_fr_BE=
+ { "fr_BE", "French - Belgium", FALSE, &my_locale_typelib_month_names_fr_FR, &my_locale_typelib_ab_month_names_fr_FR, &my_locale_typelib_day_names_fr_FR, &my_locale_typelib_ab_day_names_fr_FR };
+/***** LOCALE END fr_BE *****/
+
+/***** LOCALE BEGIN fr_CA: French - Canada *****/
+MY_LOCALE my_locale_fr_CA=
+ { "fr_CA", "French - Canada", FALSE, &my_locale_typelib_month_names_fr_FR, &my_locale_typelib_ab_month_names_fr_FR, &my_locale_typelib_day_names_fr_FR, &my_locale_typelib_ab_day_names_fr_FR };
+/***** LOCALE END fr_CA *****/
+
+/***** LOCALE BEGIN fr_CH: French - Switzerland *****/
+MY_LOCALE my_locale_fr_CH=
+ { "fr_CH", "French - Switzerland", FALSE, &my_locale_typelib_month_names_fr_FR, &my_locale_typelib_ab_month_names_fr_FR, &my_locale_typelib_day_names_fr_FR, &my_locale_typelib_ab_day_names_fr_FR };
+/***** LOCALE END fr_CH *****/
+
+/***** LOCALE BEGIN fr_LU: French - Luxembourg *****/
+MY_LOCALE my_locale_fr_LU=
+ { "fr_LU", "French - Luxembourg", FALSE, &my_locale_typelib_month_names_fr_FR, &my_locale_typelib_ab_month_names_fr_FR, &my_locale_typelib_day_names_fr_FR, &my_locale_typelib_ab_day_names_fr_FR };
+/***** LOCALE END fr_LU *****/
+
+/***** LOCALE BEGIN it_IT: Italian - Italy *****/
+MY_LOCALE my_locale_it_IT=
+ { "it_IT", "Italian - Italy", FALSE, &my_locale_typelib_month_names_it_CH, &my_locale_typelib_ab_month_names_it_CH, &my_locale_typelib_day_names_it_CH, &my_locale_typelib_ab_day_names_it_CH };
+/***** LOCALE END it_IT *****/
+
+/***** LOCALE BEGIN nl_BE: Dutch - Belgium *****/
+MY_LOCALE my_locale_nl_BE=
+ { "nl_BE", "Dutch - Belgium", TRUE, &my_locale_typelib_month_names_nl_NL, &my_locale_typelib_ab_month_names_nl_NL, &my_locale_typelib_day_names_nl_NL, &my_locale_typelib_ab_day_names_nl_NL };
+/***** LOCALE END nl_BE *****/
+
+/***** LOCALE BEGIN no_NO: Norwegian - Norway *****/
+MY_LOCALE my_locale_no_NO=
+ { "no_NO", "Norwegian - Norway", FALSE, &my_locale_typelib_month_names_nb_NO, &my_locale_typelib_ab_month_names_nb_NO, &my_locale_typelib_day_names_nb_NO, &my_locale_typelib_ab_day_names_nb_NO };
+/***** LOCALE END no_NO *****/
+
+/***** LOCALE BEGIN sv_FI: Swedish - Finland *****/
+MY_LOCALE my_locale_sv_FI=
+ { "sv_FI", "Swedish - Finland", FALSE, &my_locale_typelib_month_names_sv_SE, &my_locale_typelib_ab_month_names_sv_SE, &my_locale_typelib_day_names_sv_SE, &my_locale_typelib_ab_day_names_sv_SE };
+/***** LOCALE END sv_FI *****/
+
+/***** LOCALE BEGIN zh_HK: Chinese - Hong Kong SAR *****/
+MY_LOCALE my_locale_zh_HK=
+ { "zh_HK", "Chinese - Hong Kong SAR", FALSE, &my_locale_typelib_month_names_zh_CN, &my_locale_typelib_ab_month_names_zh_CN, &my_locale_typelib_day_names_zh_CN, &my_locale_typelib_ab_day_names_zh_CN };
+/***** LOCALE END zh_HK *****/
+
+MY_LOCALE *my_locales[]=
+ {
+ &my_locale_en_US,
+ &my_locale_en_GB,
+ &my_locale_ja_JP,
+ &my_locale_sv_SE,
+ &my_locale_de_DE,
+ &my_locale_fr_FR,
+ &my_locale_ar_AE,
+ &my_locale_ar_BH,
+ &my_locale_ar_JO,
+ &my_locale_ar_SA,
+ &my_locale_ar_SY,
+ &my_locale_be_BY,
+ &my_locale_bg_BG,
+ &my_locale_ca_ES,
+ &my_locale_cs_CZ,
+ &my_locale_da_DK,
+ &my_locale_de_AT,
+ &my_locale_es_ES,
+ &my_locale_et_EE,
+ &my_locale_eu_ES,
+ &my_locale_fi_FI,
+ &my_locale_fo_FO,
+ &my_locale_gl_ES,
+ &my_locale_gu_IN,
+ &my_locale_he_IL,
+ &my_locale_hi_IN,
+ &my_locale_hr_HR,
+ &my_locale_hu_HU,
+ &my_locale_id_ID,
+ &my_locale_is_IS,
+ &my_locale_it_CH,
+ &my_locale_ko_KR,
+ &my_locale_lt_LT,
+ &my_locale_lv_LV,
+ &my_locale_mk_MK,
+ &my_locale_mn_MN,
+ &my_locale_ms_MY,
+ &my_locale_nb_NO,
+ &my_locale_nl_NL,
+ &my_locale_pl_PL,
+ &my_locale_pt_BR,
+ &my_locale_pt_PT,
+ &my_locale_ro_RO,
+ &my_locale_ru_RU,
+ &my_locale_ru_UA,
+ &my_locale_sk_SK,
+ &my_locale_sl_SI,
+ &my_locale_sq_AL,
+ &my_locale_sr_YU,
+ &my_locale_ta_IN,
+ &my_locale_te_IN,
+ &my_locale_th_TH,
+ &my_locale_tr_TR,
+ &my_locale_uk_UA,
+ &my_locale_ur_PK,
+ &my_locale_vi_VN,
+ &my_locale_zh_CN,
+ &my_locale_zh_TW,
+ &my_locale_ar_DZ,
+ &my_locale_ar_EG,
+ &my_locale_ar_IN,
+ &my_locale_ar_IQ,
+ &my_locale_ar_KW,
+ &my_locale_ar_LB,
+ &my_locale_ar_LY,
+ &my_locale_ar_MA,
+ &my_locale_ar_OM,
+ &my_locale_ar_QA,
+ &my_locale_ar_SD,
+ &my_locale_ar_TN,
+ &my_locale_ar_YE,
+ &my_locale_de_BE,
+ &my_locale_de_CH,
+ &my_locale_de_LU,
+ &my_locale_en_AU,
+ &my_locale_en_CA,
+ &my_locale_en_IN,
+ &my_locale_en_NZ,
+ &my_locale_en_PH,
+ &my_locale_en_ZA,
+ &my_locale_en_ZW,
+ &my_locale_es_AR,
+ &my_locale_es_BO,
+ &my_locale_es_CL,
+ &my_locale_es_CO,
+ &my_locale_es_CR,
+ &my_locale_es_DO,
+ &my_locale_es_EC,
+ &my_locale_es_GT,
+ &my_locale_es_HN,
+ &my_locale_es_MX,
+ &my_locale_es_NI,
+ &my_locale_es_PA,
+ &my_locale_es_PE,
+ &my_locale_es_PR,
+ &my_locale_es_PY,
+ &my_locale_es_SV,
+ &my_locale_es_US,
+ &my_locale_es_UY,
+ &my_locale_es_VE,
+ &my_locale_fr_BE,
+ &my_locale_fr_CA,
+ &my_locale_fr_CH,
+ &my_locale_fr_LU,
+ &my_locale_it_IT,
+ &my_locale_nl_BE,
+ &my_locale_no_NO,
+ &my_locale_sv_FI,
+ &my_locale_zh_HK,
+ NULL
+ };
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 232df095816..30dea60f7a2 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -64,9 +64,8 @@ extern "C" int gethostname(char *name, int namelen);
static void time_out_user_resource_limits(THD *thd, USER_CONN *uc);
#ifndef NO_EMBEDDED_ACCESS_CHECKS
static int check_for_max_user_connections(THD *thd, USER_CONN *uc);
-#endif
static void decrease_user_connections(USER_CONN *uc);
-static bool check_db_used(THD *thd,TABLE_LIST *tables);
+#endif /* NO_EMBEDDED_ACCESS_CHECKS */
static bool check_multi_update_lock(THD *thd);
static void remove_escape(char *name);
static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables);
@@ -111,8 +110,6 @@ const char *xa_state_names[]={
"NON-EXISTING", "ACTIVE", "IDLE", "PREPARED"
};
-static char empty_c_string[1]= {0}; // Used for not defined 'db'
-
#ifdef __WIN__
static void test_signal(int sig_ptr)
{
@@ -221,6 +218,7 @@ static bool some_non_temp_table_to_be_updated(THD *thd, TABLE_LIST *tables)
return 0;
}
+#ifndef NO_EMBEDDED_ACCESS_CHECKS
static HASH hash_user_connections;
static int get_or_create_user_conn(THD *thd, const char *user,
@@ -272,6 +270,7 @@ end:
return return_val;
}
+#endif /* !NO_EMBEDDED_ACCESS_CHECKS */
/*
@@ -315,14 +314,11 @@ int check_user(THD *thd, enum enum_server_command command,
thd->db is saved in caller and needs to be freed by caller if this
function returns 0
*/
- thd->db= 0;
- thd->db_length= 0;
+ thd->reset_db(NULL, 0);
if (mysql_change_db(thd, db, FALSE))
{
/* Send the error to the client */
net_send_error(thd);
- if (thd->user_connect)
- decrease_user_connections(thd->user_connect);
DBUG_RETURN(-1);
}
}
@@ -356,9 +352,8 @@ int check_user(THD *thd, enum enum_server_command command,
if connect failed. Also in case of 'CHANGE USER' failure, current
database will be switched to 'no database selected'.
*/
- thd->db= 0;
- thd->db_length= 0;
-
+ thd->reset_db(NULL, 0);
+
USER_RESOURCES ur;
int res= acl_getroot(thd, &ur, passwd, passwd_len);
#ifndef EMBEDDED_LIBRARY
@@ -514,10 +509,12 @@ extern "C" void free_user(struct user_conn *uc)
void init_max_user_conn(void)
{
+#ifndef NO_EMBEDDED_ACCESS_CHECKS
(void) hash_init(&hash_user_connections,system_charset_info,max_connections,
0,0,
(hash_get_key) get_key_conn, (hash_free_key) free_user,
0);
+#endif
}
@@ -580,7 +577,6 @@ static int check_for_max_user_connections(THD *thd, USER_CONN *uc)
(void) pthread_mutex_unlock(&LOCK_user_conn);
DBUG_RETURN(error);
}
-#endif /* NO_EMBEDDED_ACCESS_CHECKS */
/*
Decrease user connection count
@@ -614,13 +610,18 @@ static void decrease_user_connections(USER_CONN *uc)
DBUG_VOID_RETURN;
}
+#endif /* NO_EMBEDDED_ACCESS_CHECKS */
+
void free_max_user_conn(void)
{
+#ifndef NO_EMBEDDED_ACCESS_CHECKS
hash_free(&hash_user_connections);
+#endif /* NO_EMBEDDED_ACCESS_CHECKS */
}
+
/*
Mark all commands that somehow changes a table
This is used to check number of updates / hour
@@ -823,6 +824,37 @@ static void reset_mqh(LEX_USER *lu, bool get_them= 0)
#endif /* NO_EMBEDDED_ACCESS_CHECKS */
}
+void thd_init_client_charset(THD *thd, uint cs_number)
+{
+ /*
+ Use server character set and collation if
+ - opt_character_set_client_handshake is not set
+ - client has not specified a character set
+ - client character set is the same as the servers
+ - client character set doesn't exists in server
+ */
+ if (!opt_character_set_client_handshake ||
+ !(thd->variables.character_set_client= get_charset(cs_number, MYF(0))) ||
+ !my_strcasecmp(&my_charset_latin1,
+ global_system_variables.character_set_client->name,
+ thd->variables.character_set_client->name))
+ {
+ thd->variables.character_set_client=
+ global_system_variables.character_set_client;
+ thd->variables.collation_connection=
+ global_system_variables.collation_connection;
+ thd->variables.character_set_results=
+ global_system_variables.character_set_results;
+ }
+ else
+ {
+ thd->variables.character_set_results=
+ thd->variables.collation_connection=
+ thd->variables.character_set_client;
+ }
+}
+
+
/*
Perform handshake, authorize client and update thd ACL variables.
SYNOPSIS
@@ -958,33 +990,7 @@ static int check_connection(THD *thd)
thd->client_capabilities|= ((ulong) uint2korr(net->read_pos+2)) << 16;
thd->max_client_packet_length= uint4korr(net->read_pos+4);
DBUG_PRINT("info", ("client_character_set: %d", (uint) net->read_pos[8]));
- /*
- Use server character set and collation if
- - opt_character_set_client_handshake is not set
- - client has not specified a character set
- - client character set is the same as the servers
- - client character set doesn't exists in server
- */
- if (!opt_character_set_client_handshake ||
- !(thd->variables.character_set_client=
- get_charset((uint) net->read_pos[8], MYF(0))) ||
- !my_strcasecmp(&my_charset_latin1,
- global_system_variables.character_set_client->name,
- thd->variables.character_set_client->name))
- {
- thd->variables.character_set_client=
- global_system_variables.character_set_client;
- thd->variables.collation_connection=
- global_system_variables.collation_connection;
- thd->variables.character_set_results=
- global_system_variables.character_set_results;
- }
- else
- {
- thd->variables.character_set_results=
- thd->variables.collation_connection=
- thd->variables.character_set_client;
- }
+ thd_init_client_charset(thd, (uint) net->read_pos[8]);
thd->update_charset();
end= (char*) net->read_pos+32;
}
@@ -1298,6 +1304,12 @@ pthread_handler_t handle_bootstrap(void *arg)
thd->security_ctx->priv_user=
thd->security_ctx->user= (char*) my_strdup("boot", MYF(MY_WME));
thd->security_ctx->priv_host[0]=0;
+ /*
+ Make the "client" handle multiple results. This is necessary
+ to enable stored procedures with SELECTs and Dynamic SQL
+ in init-file.
+ */
+ thd->client_capabilities|= CLIENT_MULTI_RESULTS;
buff= (char*) thd->net.buff;
thd->init_for_queries();
@@ -1365,7 +1377,8 @@ end:
DBUG_RETURN(0);
}
- /* This works because items are allocated with sql_alloc() */
+
+/* This works because items are allocated with sql_alloc() */
void free_items(Item *item)
{
@@ -1379,7 +1392,7 @@ void free_items(Item *item)
DBUG_VOID_RETURN;
}
- /* This works because items are allocated with sql_alloc() */
+/* This works because items are allocated with sql_alloc() */
void cleanup_items(Item *item)
{
@@ -1389,7 +1402,26 @@ void cleanup_items(Item *item)
DBUG_VOID_RETURN;
}
-int mysql_table_dump(THD* thd, char* db, char* tbl_name, int fd)
+/*
+ Handle COM_TABLE_DUMP command
+
+ SYNOPSIS
+ mysql_table_dump
+ thd thread handle
+ db database name or an empty string. If empty,
+ the current database of the connection is used
+ tbl_name name of the table to dump
+
+ NOTES
+ This function is written to handle one specific command only.
+
+ RETURN VALUE
+ 0 success
+ 1 error, the error message is set in THD
+*/
+
+static
+int mysql_table_dump(THD* thd, char* db, char* tbl_name)
{
TABLE* table;
TABLE_LIST* table_list;
@@ -1426,7 +1458,7 @@ int mysql_table_dump(THD* thd, char* db, char* tbl_name, int fd)
goto err;
}
net_flush(&thd->net);
- if ((error= table->file->dump(thd,fd)))
+ if ((error= table->file->dump(thd,-1)))
my_error(ER_GET_ERRNO, MYF(0), error);
err:
@@ -1678,7 +1710,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
}
tbl_name= strmake(db, packet + 1, db_len)+1;
strmake(tbl_name, packet + db_len + 2, tbl_len);
- mysql_table_dump(thd, db, tbl_name, -1);
+ mysql_table_dump(thd, db, tbl_name);
break;
}
case COM_CHANGE_USER:
@@ -1744,9 +1776,11 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
}
else
{
+#ifndef NO_EMBEDDED_ACCESS_CHECKS
/* we've authenticated new user */
if (save_user_connect)
decrease_user_connections(save_user_connect);
+#endif /* NO_EMBEDDED_ACCESS_CHECKS */
x_free((gptr) save_db);
x_free((gptr) save_security_ctx.user);
}
@@ -1853,11 +1887,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
statistic_increment(thd->status_var.com_stat[SQLCOM_SHOW_FIELDS],
&LOCK_status);
bzero((char*) &table_list,sizeof(table_list));
- if (!(table_list.db=thd->db))
- {
- my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0));
+ if (thd->copy_db_to(&table_list.db, 0))
break;
- }
pend= strend(packet);
thd->convert_string(&conv_name, system_charset_info,
packet, (uint) (pend-packet), thd->charset());
@@ -2205,6 +2236,34 @@ void log_slow_statement(THD *thd)
}
+/*
+ Create a TABLE_LIST object for an INFORMATION_SCHEMA table.
+
+ SYNOPSIS
+ prepare_schema_table()
+ thd thread handle
+ lex current lex
+ table_ident table alias if it's used
+ schema_table_idx the type of the INFORMATION_SCHEMA table to be
+ created
+
+ DESCRIPTION
+ This function is used in the parser to convert a SHOW or DESCRIBE
+ table_name command to a SELECT from INFORMATION_SCHEMA.
+ It prepares a SELECT_LEX and a TABLE_LIST object to represent the
+ given command as a SELECT parse tree.
+
+ NOTES
+ Due to the way this function works with memory and LEX it cannot
+ be used outside the parser (parse tree transformations outside
+ the parser break PS and SP).
+
+ RETURN VALUE
+ 0 success
+ 1 out of memory or SHOW commands are not allowed
+ in this version of the server.
+*/
+
int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident,
enum enum_schema_tables schema_table_idx)
{
@@ -2233,13 +2292,13 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident,
DBUG_RETURN(1);
#else
{
- char *db= lex->select_lex.db ? lex->select_lex.db : thd->db;
- if (!db)
+ char *db;
+ if (lex->select_lex.db == NULL &&
+ thd->copy_db_to(&lex->select_lex.db, 0))
{
- my_message(ER_NO_DB_ERROR,
- ER(ER_NO_DB_ERROR), MYF(0)); /* purecov: inspected */
- DBUG_RETURN(1); /* purecov: inspected */
+ DBUG_RETURN(1);
}
+ db= lex->select_lex.db;
remove_escape(db); // Fix escaped '_'
if (check_db_name(db))
{
@@ -2256,11 +2315,6 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident,
db);
DBUG_RETURN(1);
}
- /*
- We need to do a copy to make this prepared statement safe if this
- was thd->db
- */
- lex->select_lex.db= thd->strdup(db);
break;
}
#endif
@@ -2390,17 +2444,37 @@ static void reset_one_shot_variables(THD *thd)
}
-/****************************************************************************
-** mysql_execute_command
-** Execute command saved in thd and current_lex->sql_command
-****************************************************************************/
+/*
+ Execute command saved in thd and lex->sql_command
+
+ SYNOPSIS
+ mysql_execute_command()
+ thd Thread handle
+
+ IMPLEMENTATION
+
+ Before every operation that can request a write lock for a table
+ wait if a global read lock exists. However do not wait if this
+ thread has locked tables already. No new locks can be requested
+ until the other locks are released. The thread that requests the
+ global read lock waits for write locked tables to become unlocked.
+
+ Note that wait_if_global_read_lock() sets a protection against a new
+ global read lock when it succeeds. This needs to be released by
+ start_waiting_global_read_lock() after the operation.
+
+ RETURN
+ FALSE OK
+ TRUE Error
+*/
bool
mysql_execute_command(THD *thd)
{
- bool res= FALSE;
- int result= 0;
- LEX *lex= thd->lex;
+ bool res= FALSE;
+ bool need_start_waiting= FALSE; // have protection against global read lock
+ int result= 0;
+ LEX *lex= thd->lex;
/* first SELECT_LEX (have special meaning for many of non-SELECTcommands) */
SELECT_LEX *select_lex= &lex->select_lex;
/* first table of first SELECT_LEX */
@@ -2503,11 +2577,6 @@ mysql_execute_command(THD *thd)
statistic_increment(thd->status_var.com_stat[lex->sql_command],
&LOCK_status);
-#ifdef HAVE_ROW_BASED_REPLICATION
- if (lex->binlog_row_based_if_mixed)
- thd->set_current_stmt_binlog_row_based_if_mixed();
-#endif /*HAVE_ROW_BASED_REPLICATION*/
-
switch (lex->sql_command) {
case SQLCOM_SHOW_EVENTS:
if ((res= check_access(thd, EVENT_ACL, thd->lex->select_lex.db, 0, 0, 0,
@@ -2677,8 +2746,7 @@ mysql_execute_command(THD *thd)
case SQLCOM_BACKUP_TABLE:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
- if (check_db_used(thd, all_tables) ||
- check_table_access(thd, SELECT_ACL, all_tables, 0) ||
+ if (check_table_access(thd, SELECT_ACL, all_tables, 0) ||
check_global_access(thd, FILE_ACL))
goto error; /* purecov: inspected */
thd->enable_slow_log= opt_log_slow_admin_statements;
@@ -2690,8 +2758,7 @@ mysql_execute_command(THD *thd)
case SQLCOM_RESTORE_TABLE:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
- if (check_db_used(thd, all_tables) ||
- check_table_access(thd, INSERT_ACL, all_tables, 0) ||
+ if (check_table_access(thd, INSERT_ACL, all_tables, 0) ||
check_global_access(thd, FILE_ACL))
goto error; /* purecov: inspected */
thd->enable_slow_log= opt_log_slow_admin_statements;
@@ -2703,8 +2770,7 @@ mysql_execute_command(THD *thd)
case SQLCOM_ASSIGN_TO_KEYCACHE:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
- if (check_db_used(thd, all_tables) ||
- check_access(thd, INDEX_ACL, first_table->db,
+ if (check_access(thd, INDEX_ACL, first_table->db,
&first_table->grant.privilege, 0, 0,
test(first_table->schema_table)))
goto error;
@@ -2714,8 +2780,7 @@ mysql_execute_command(THD *thd)
case SQLCOM_PRELOAD_KEYS:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
- if (check_db_used(thd, all_tables) ||
- check_access(thd, INDEX_ACL, first_table->db,
+ if (check_access(thd, INDEX_ACL, first_table->db,
&first_table->grant.privilege, 0, 0,
test(first_table->schema_table)))
goto error;
@@ -2777,8 +2842,8 @@ mysql_execute_command(THD *thd)
case SQLCOM_LOAD_MASTER_TABLE:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
- if (!first_table->db)
- first_table->db= thd->db;
+ DBUG_ASSERT(first_table->db); /* Must be set in the parser */
+
if (check_access(thd, CREATE_ACL, first_table->db,
&first_table->grant.privilege, 0, 0,
test(first_table->schema_table)))
@@ -2865,7 +2930,8 @@ mysql_execute_command(THD *thd)
TABLE in the same way. That way we avoid that a new table is
created during a gobal read lock.
*/
- if (wait_if_global_read_lock(thd, 0, 1))
+ if (!thd->locked_tables &&
+ !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1)))
{
res= 1;
goto end_with_restore_list;
@@ -2901,7 +2967,7 @@ mysql_execute_command(THD *thd)
{
update_non_unique_table_error(create_table, "CREATE", duplicate);
res= 1;
- goto end_with_restart_wait;
+ goto end_with_restore_list;
}
}
/* If we create merge table, we have to test tables in merge, too */
@@ -2917,7 +2983,7 @@ mysql_execute_command(THD *thd)
{
update_non_unique_table_error(tab, "CREATE", duplicate);
res= 1;
- goto end_with_restart_wait;
+ goto end_with_restore_list;
}
}
}
@@ -2962,13 +3028,6 @@ mysql_execute_command(THD *thd)
send_ok(thd);
}
-end_with_restart_wait:
- /*
- Release the protection against the global read lock and wake
- everyone, who might want to set a global read lock.
- */
- start_waiting_global_read_lock(thd);
-
/* put tables back for PS rexecuting */
end_with_restore_list:
lex->link_first_table_back(create_table, link_to_local);
@@ -3039,25 +3098,8 @@ end_with_restore_list:
my_error(ER_WRONG_TABLE_NAME, MYF(0), lex->name);
goto error;
}
- if (!select_lex->db)
- {
- /*
- In the case of ALTER TABLE ... RENAME we should supply the
- default database if the new name is not explicitly qualified
- by a database. (Bug #11493)
- */
- if (lex->alter_info.flags & ALTER_RENAME)
- {
- if (! thd->db)
- {
- my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0));
- goto error;
- }
- select_lex->db= thd->db;
- }
- else
- select_lex->db= first_table->db;
- }
+ /* Must be set in the parser */
+ DBUG_ASSERT(select_lex->db);
if (check_access(thd, priv_needed, first_table->db,
&first_table->grant.privilege, 0, 0,
test(first_table->schema_table)) ||
@@ -3084,11 +3126,24 @@ end_with_restore_list:
}
}
/* Don't yet allow changing of symlinks with ALTER TABLE */
+ if (lex->create_info.data_file_name)
+ push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, 0,
+ "DATA DIRECTORY option ignored");
+ if (lex->create_info.index_file_name)
+ push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, 0,
+ "INDEX DIRECTORY option ignored");
lex->create_info.data_file_name=lex->create_info.index_file_name=0;
/* ALTER TABLE ends previous transaction */
if (end_active_trans(thd))
goto error;
+ if (!thd->locked_tables &&
+ !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1)))
+ {
+ res= 1;
+ break;
+ }
+
thd->enable_slow_log= opt_log_slow_admin_statements;
res= mysql_alter_table(thd, select_lex->db, lex->name,
&lex->create_info,
@@ -3096,8 +3151,7 @@ end_with_restore_list:
lex->key_list,
select_lex->order_list.elements,
(ORDER *) select_lex->order_list.first,
- lex->duplicates, lex->ignore, &lex->alter_info,
- 1);
+ lex->ignore, &lex->alter_info, 1);
break;
}
#endif /*DONT_ALLOW_SHOW_COMMANDS*/
@@ -3105,8 +3159,6 @@ end_with_restore_list:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
TABLE_LIST *table;
- if (check_db_used(thd, all_tables))
- goto error;
for (table= first_table; table; table= table->next_local->next_local)
{
if (check_access(thd, ALTER_ACL | DROP_ACL, table->db,
@@ -3163,8 +3215,7 @@ end_with_restore_list:
if (lex->only_view)
first_table->skip_temporary= 1;
- if (check_db_used(thd, all_tables) ||
- check_access(thd, SELECT_ACL | EXTRA_ACL, first_table->db,
+ if (check_access(thd, SELECT_ACL | EXTRA_ACL, first_table->db,
&first_table->grant.privilege, 0, 0,
test(first_table->schema_table)))
goto error;
@@ -3177,8 +3228,7 @@ end_with_restore_list:
case SQLCOM_CHECKSUM:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
- if (check_db_used(thd, all_tables) ||
- check_table_access(thd, SELECT_ACL | EXTRA_ACL, all_tables, 0))
+ if (check_table_access(thd, SELECT_ACL | EXTRA_ACL, all_tables, 0))
goto error; /* purecov: inspected */
res = mysql_checksum_table(thd, first_table, &lex->check_opt);
break;
@@ -3186,8 +3236,7 @@ end_with_restore_list:
case SQLCOM_REPAIR:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
- if (check_db_used(thd, all_tables) ||
- check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables, 0))
+ if (check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables, 0))
goto error; /* purecov: inspected */
thd->enable_slow_log= opt_log_slow_admin_statements;
res= mysql_repair_table(thd, first_table, &lex->check_opt);
@@ -3208,8 +3257,7 @@ end_with_restore_list:
case SQLCOM_CHECK:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
- if (check_db_used(thd, all_tables) ||
- check_table_access(thd, SELECT_ACL | EXTRA_ACL , all_tables, 0))
+ if (check_table_access(thd, SELECT_ACL | EXTRA_ACL , all_tables, 0))
goto error; /* purecov: inspected */
thd->enable_slow_log= opt_log_slow_admin_statements;
res = mysql_check_table(thd, first_table, &lex->check_opt);
@@ -3220,8 +3268,7 @@ end_with_restore_list:
case SQLCOM_ANALYZE:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
- if (check_db_used(thd, all_tables) ||
- check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables, 0))
+ if (check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables, 0))
goto error; /* purecov: inspected */
thd->enable_slow_log= opt_log_slow_admin_statements;
res= mysql_analyze_table(thd, first_table, &lex->check_opt);
@@ -3243,8 +3290,7 @@ end_with_restore_list:
case SQLCOM_OPTIMIZE:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
- if (check_db_used(thd, all_tables) ||
- check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables, 0))
+ if (check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables, 0))
goto error; /* purecov: inspected */
thd->enable_slow_log= opt_log_slow_admin_statements;
res= (specialflag & (SPECIAL_SAFE_MODE | SPECIAL_NO_NEW_FUNC)) ?
@@ -3345,11 +3391,20 @@ end_with_restore_list:
break;
/* Skip first table, which is the table we are inserting in */
select_lex->context.table_list= first_table->next_local;
+
+ if (!thd->locked_tables &&
+ !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1)))
+ {
+ res= 1;
+ break;
+ }
+
res= mysql_insert(thd, all_tables, lex->field_list, lex->many_values,
lex->update_list, lex->value_list,
lex->duplicates, lex->ignore);
+ /* do not show last insert ID if VIEW does not have auto_inc */
if (first_table->view && !first_table->contain_auto_increment)
- thd->last_insert_id= 0; // do not show last insert ID if VIEW have not it
+ thd->first_successful_insert_id_in_cur_stmt= 0;
break;
}
case SQLCOM_REPLACE_SELECT:
@@ -3368,6 +3423,14 @@ end_with_restore_list:
select_lex->options|= SELECT_NO_UNLOCK;
unit->set_limit(select_lex);
+
+ if (! thd->locked_tables &&
+ ! (need_start_waiting= ! wait_if_global_read_lock(thd, 0, 1)))
+ {
+ res= 1;
+ break;
+ }
+
if (!(res= open_and_lock_tables(thd, all_tables)))
{
/* Skip first table, which is the table we are inserting in */
@@ -3401,9 +3464,9 @@ end_with_restore_list:
/* revert changes for SP */
select_lex->table_list.first= (byte*) first_table;
}
-
+ /* do not show last insert ID if VIEW does not have auto_inc */
if (first_table->view && !first_table->contain_auto_increment)
- thd->last_insert_id= 0; // do not show last insert ID if VIEW have not it
+ thd->first_successful_insert_id_in_cur_stmt= 0;
break;
}
case SQLCOM_TRUNCATE:
@@ -3435,6 +3498,14 @@ end_with_restore_list:
break;
DBUG_ASSERT(select_lex->offset_limit == 0);
unit->set_limit(select_lex);
+
+ if (!thd->locked_tables &&
+ !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1)))
+ {
+ res= 1;
+ break;
+ }
+
res = mysql_delete(thd, all_tables, select_lex->where,
&select_lex->order_list,
unit->select_limit_cnt, select_lex->options,
@@ -3445,9 +3516,16 @@ end_with_restore_list:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
TABLE_LIST *aux_tables=
- (TABLE_LIST *)thd->lex->auxilliary_table_list.first;
+ (TABLE_LIST *)thd->lex->auxiliary_table_list.first;
multi_delete *result;
+ if (!thd->locked_tables &&
+ !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1)))
+ {
+ res= 1;
+ break;
+ }
+
if ((res= multi_delete_precheck(thd, all_tables)))
break;
@@ -3633,7 +3711,7 @@ end_with_restore_list:
break;
case SQLCOM_LOCK_TABLES:
unlock_locked_tables(thd);
- if (check_db_used(thd, all_tables) || end_active_trans(thd))
+ if (end_active_trans(thd))
goto error;
if (check_table_access(thd, LOCK_TABLES_ACL | SELECT_ACL, all_tables, 0))
goto error;
@@ -3772,12 +3850,8 @@ end_with_restore_list:
}
case SQLCOM_ALTER_DB:
{
- char *db= lex->name ? lex->name : thd->db;
- if (!db)
- {
- my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0));
- break;
- }
+ char *db= lex->name;
+ DBUG_ASSERT(db); /* Must be set in the parser */
if (!strip_sp(db) || check_db_name(db))
{
my_error(ER_WRONG_DB_NAME, MYF(0), db);
@@ -4010,11 +4084,13 @@ end_with_restore_list:
if (thd->security_ctx->user) // If not replication
{
- LEX_USER *user;
+ LEX_USER *user, *tmp_user;
List_iterator <LEX_USER> user_list(lex->users_list);
- while ((user= user_list++))
+ while ((tmp_user= user_list++))
{
+ if (!(user= get_current_user(thd, tmp_user)))
+ goto error;
if (specialflag & SPECIAL_NO_RESOLVE &&
hostname_requires_resolving(user->host.str))
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
@@ -4096,9 +4172,13 @@ end_with_restore_list:
if (lex->sql_command == SQLCOM_GRANT)
{
List_iterator <LEX_USER> str_list(lex->users_list);
- LEX_USER *user;
- while ((user=str_list++))
+ LEX_USER *user, *tmp_user;
+ while ((tmp_user=str_list++))
+ {
+ if (!(user= get_current_user(thd, tmp_user)))
+ goto error;
reset_mqh(user);
+ }
}
}
}
@@ -4114,7 +4194,7 @@ end_with_restore_list:
case SQLCOM_FLUSH:
{
bool write_to_binlog;
- if (check_global_access(thd,RELOAD_ACL) || check_db_used(thd, all_tables))
+ if (check_global_access(thd,RELOAD_ACL))
goto error;
/*
reload_acl_and_cache() will tell us if we are allowed to write to the
@@ -4153,25 +4233,27 @@ end_with_restore_list:
}
#ifndef NO_EMBEDDED_ACCESS_CHECKS
case SQLCOM_SHOW_GRANTS:
+ {
+ LEX_USER *grant_user= get_current_user(thd, lex->grant_user);
+ if (!grant_user)
+ goto error;
if ((thd->security_ctx->priv_user &&
- !strcmp(thd->security_ctx->priv_user, lex->grant_user->user.str)) ||
+ !strcmp(thd->security_ctx->priv_user, grant_user->user.str)) ||
!check_access(thd, SELECT_ACL, "mysql",0,1,0,0))
{
- res = mysql_show_grants(thd,lex->grant_user);
+ res = mysql_show_grants(thd, grant_user);
}
break;
+ }
#endif
case SQLCOM_HA_OPEN:
DBUG_ASSERT(first_table == all_tables && first_table != 0);
- if (check_db_used(thd, all_tables) ||
- check_table_access(thd, SELECT_ACL, all_tables, 0))
+ if (check_table_access(thd, SELECT_ACL, all_tables, 0))
goto error;
res= mysql_ha_open(thd, first_table, 0);
break;
case SQLCOM_HA_CLOSE:
DBUG_ASSERT(first_table == all_tables && first_table != 0);
- if (check_db_used(thd, all_tables))
- goto error;
res= mysql_ha_close(thd, first_table);
break;
case SQLCOM_HA_READ:
@@ -4181,8 +4263,6 @@ end_with_restore_list:
if a user has no permissions to read a table, he won't be
able to open it (with SQLCOM_HA_OPEN) in the first place.
*/
- if (check_db_used(thd, all_tables))
- goto error;
unit->set_limit(select_lex);
res= mysql_ha_read(thd, first_table, lex->ha_read_mode, lex->ident.str,
lex->insert_list, lex->ha_rkey_mode, select_lex->where,
@@ -4312,23 +4392,11 @@ end_with_restore_list:
case SQLCOM_CREATE_SPFUNCTION:
{
uint namelen;
- char *name, *db;
+ char *name;
int result;
DBUG_ASSERT(lex->sphead != 0);
-
- if (!lex->sphead->m_db.str || !lex->sphead->m_db.str[0])
- {
- if (!thd->db)
- {
- my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0));
- delete lex->sphead;
- lex->sphead= 0;
- goto error;
- }
- lex->sphead->m_db.length= strlen(thd->db);
- lex->sphead->m_db.str= thd->db;
- }
+ DBUG_ASSERT(lex->sphead->m_db.str); /* Must be initialized in the parser */
if (check_access(thd, CREATE_PROC_ACL, lex->sphead->m_db.str, 0, 0, 0,
is_schema_db(lex->sphead->m_db.str)))
@@ -4445,34 +4513,17 @@ end_with_restore_list:
}
#endif /* NO_EMBEDDED_ACCESS_CHECKS */
- /*
- We need to copy name and db in order to use them for
- check_routine_access which is called after lex->sphead has
- been deleted.
- */
- name= thd->strdup(name);
- lex->sphead->m_db.str= db= thd->strmake(lex->sphead->m_db.str,
- lex->sphead->m_db.length);
res= (result= lex->sphead->create(thd));
if (result == SP_OK)
{
- /*
- We must cleanup the unit and the lex here because
- sp_grant_privileges calls (indirectly) db_find_routine,
- which in turn may call MYSQLparse with THD::lex.
- TODO: fix db_find_routine to use a temporary lex.
- */
- lex->unit.cleanup();
- delete lex->sphead;
- lex->sphead= 0;
#ifndef NO_EMBEDDED_ACCESS_CHECKS
/* only add privileges if really neccessary */
if (sp_automatic_privileges && !opt_noacl &&
check_routine_access(thd, DEFAULT_CREATE_PROC_ACLS,
- db, name,
+ lex->sphead->m_db.str, name,
lex->sql_command == SQLCOM_CREATE_PROCEDURE, 1))
{
- if (sp_grant_privileges(thd, db, name,
+ if (sp_grant_privileges(thd, lex->sphead->m_db.str, name,
lex->sql_command == SQLCOM_CREATE_PROCEDURE))
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_PROC_AUTO_GRANT_FAIL,
@@ -4480,6 +4531,9 @@ end_with_restore_list:
close_thread_tables(thd);
}
#endif
+ lex->unit.cleanup();
+ delete lex->sphead;
+ lex->sphead= 0;
send_ok(thd);
}
else
@@ -4896,7 +4950,8 @@ end_with_restore_list:
view_store_options(thd, first_table, &buff);
buff.append(STRING_WITH_LEN("VIEW "));
/* Test if user supplied a db (ie: we did not use thd->db) */
- if (first_table->db != thd->db && first_table->db[0])
+ if (first_table->db && first_table->db[0] &&
+ (thd->db == NULL || strcmp(first_table->db, thd->db)))
{
append_identifier(thd, &buff, first_table->db,
first_table->db_length);
@@ -5166,9 +5221,6 @@ end:
*/
if (thd->one_shot_set && lex->sql_command != SQLCOM_SET_OPTION)
reset_one_shot_variables(thd);
-#ifdef HAVE_ROW_BASED_REPLICATION
- thd->reset_current_stmt_binlog_row_based();
-#endif /*HAVE_ROW_BASED_REPLICATION*/
/*
The return value for ROW_COUNT() is "implementation dependent" if the
@@ -5178,11 +5230,22 @@ end:
*/
if (!(sql_command_flags[lex->sql_command] & CF_HAS_ROW_COUNT))
thd->row_count_func= -1;
- DBUG_RETURN(res || thd->net.report_error);
+
+ goto finish;
error:
- res= 1; // would be better to set res=1 before "goto error"
- goto end;
+ res= TRUE;
+
+finish:
+ if (need_start_waiting)
+ {
+ /*
+ Release the protection against the global read lock and wake
+ everyone, who might want to set a global read lock.
+ */
+ start_waiting_global_read_lock(thd);
+ }
+ DBUG_RETURN(res || thd->net.report_error);
}
@@ -5549,7 +5612,7 @@ check_table_access(THD *thd, ulong want_access,TABLE_LIST *tables,
(want_access & ~EXTRA_ACL) &&
thd->db)
tables->grant.privilege= want_access;
- else if (tables->db && tables->db == thd->db)
+ else if (tables->db && thd->db && strcmp(tables->db, thd->db) == 0)
{
if (found && !grant_option) // db already checked
tables->grant.privilege=found_access;
@@ -5695,24 +5758,6 @@ bool check_merge_table_access(THD *thd, char *db,
}
-static bool check_db_used(THD *thd,TABLE_LIST *tables)
-{
- for (; tables; tables= tables->next_global)
- {
- if (!tables->db)
- {
- if (!(tables->db=thd->db))
- {
- my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR),
- MYF(0)); /* purecov: tested */
- return TRUE; /* purecov: tested */
- }
- }
- }
- return FALSE;
-}
-
-
/****************************************************************************
Check stack size; Send error if there isn't enough stack to continue
****************************************************************************/
@@ -5760,7 +5805,7 @@ bool check_stack_overrun(THD *thd, long margin,
bool my_yyoverflow(short **yyss, YYSTYPE **yyvs, ulong *yystacksize)
{
- LEX *lex=current_lex;
+ LEX *lex= current_thd->lex;
ulong old_info=0;
if ((uint) *yystacksize >= MY_YACC_MAX)
return 1;
@@ -5807,6 +5852,7 @@ mysql_init_query(THD *thd, uchar *buf, uint length)
DESCRIPTION
This needs to be called before execution of every statement
(prepared or conventional).
+ It is not called by substatements of routines.
TODO
Make it a method of THD and align its name with the rest of
@@ -5817,9 +5863,12 @@ mysql_init_query(THD *thd, uchar *buf, uint length)
void mysql_reset_thd_for_next_command(THD *thd)
{
DBUG_ENTER("mysql_reset_thd_for_next_command");
+ DBUG_ASSERT(!thd->spcont); /* not for substatements of routines */
thd->free_list= 0;
thd->select_number= 1;
- thd->last_insert_id_used= thd->query_start_used= thd->insert_id_used=0;
+ thd->auto_inc_intervals_in_cur_stmt_for_binlog.empty();
+ thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt=
+ thd->query_start_used= 0;
thd->is_fatal_error= thd->time_zone_used= 0;
thd->server_status&= ~ (SERVER_MORE_RESULTS_EXISTS |
SERVER_QUERY_NO_INDEX_USED |
@@ -5846,6 +5895,12 @@ void mysql_reset_thd_for_next_command(THD *thd)
thd->rand_used= 0;
thd->sent_row_count= thd->examined_row_count= 0;
}
+ /*
+ Because we come here only for start of top-statements, binlog format is
+ constant inside a complex statement (using stored functions) etc.
+ */
+ thd->reset_current_stmt_binlog_row_based();
+
DBUG_VOID_RETURN;
}
@@ -5983,7 +6038,7 @@ void mysql_init_multi_delete(LEX *lex)
mysql_init_select(lex);
lex->select_lex.select_limit= 0;
lex->unit.select_limit_cnt= HA_POS_ERROR;
- lex->select_lex.table_list.save_and_clear(&lex->auxilliary_table_list);
+ lex->select_lex.table_list.save_and_clear(&lex->auxiliary_table_list);
lex->lock_option= using_update_log ? TL_READ_NO_INSERT : TL_READ;
lex->query_tables= 0;
lex->query_tables_last= &lex->query_tables;
@@ -6215,7 +6270,7 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type,
void store_position_for_column(const char *name)
{
- current_lex->last_field->after=my_const_cast(char*) (name);
+ current_thd->lex->last_field->after=my_const_cast(char*) (name);
}
bool
@@ -6354,19 +6409,8 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
ptr->db= table->db.str;
ptr->db_length= table->db.length;
}
- else if (thd->db)
- {
- ptr->db= thd->db;
- ptr->db_length= thd->db_length;
- }
- else
- {
- /* The following can't be "" as we may do 'casedn_str()' on it */
- ptr->db= empty_c_string;
- ptr->db_length= 0;
- }
- if (thd->stmt_arena->is_stmt_prepare_or_first_sp_execute())
- ptr->db= thd->strdup(ptr->db);
+ else if (thd->copy_db_to(&ptr->db, &ptr->db_length))
+ DBUG_RETURN(0);
ptr->alias= alias_str;
if (lower_case_table_names && table->table.length)
@@ -7261,7 +7305,7 @@ Item * all_any_subquery_creator(Item *left_expr,
return new Item_func_not(new Item_in_subselect(left_expr, select_lex));
Item_allany_subselect *it=
- new Item_allany_subselect(left_expr, (*cmp)(all), select_lex, all);
+ new Item_allany_subselect(left_expr, cmp, select_lex, all);
if (all)
return it->upper_item= new Item_func_not_all(it); /* ALL */
@@ -7294,7 +7338,7 @@ bool mysql_create_index(THD *thd, TABLE_LIST *table_list, List<Key> &keys)
DBUG_RETURN(mysql_alter_table(thd,table_list->db,table_list->table_name,
&create_info, table_list,
fields, keys, 0, (ORDER*)0,
- DUP_ERROR, 0, &alter_info, 1));
+ 0, &alter_info, 1));
}
@@ -7312,7 +7356,7 @@ bool mysql_drop_index(THD *thd, TABLE_LIST *table_list, ALTER_INFO *alter_info)
DBUG_RETURN(mysql_alter_table(thd,table_list->db,table_list->table_name,
&create_info, table_list,
fields, keys, 0, (ORDER*)0,
- DUP_ERROR, 0, alter_info, 1));
+ 0, alter_info, 1));
}
@@ -7413,14 +7457,13 @@ bool multi_delete_precheck(THD *thd, TABLE_LIST *tables)
{
SELECT_LEX *select_lex= &thd->lex->select_lex;
TABLE_LIST *aux_tables=
- (TABLE_LIST *)thd->lex->auxilliary_table_list.first;
+ (TABLE_LIST *)thd->lex->auxiliary_table_list.first;
TABLE_LIST **save_query_tables_own_last= thd->lex->query_tables_own_last;
DBUG_ENTER("multi_delete_precheck");
/* sql_yacc guarantees that tables and aux_tables are not zero */
DBUG_ASSERT(aux_tables != 0);
- if (check_db_used(thd, tables) || check_db_used(thd,aux_tables) ||
- check_table_access(thd, SELECT_ACL, tables, 0))
+ if (check_table_access(thd, SELECT_ACL, tables, 0))
DBUG_RETURN(TRUE);
/*
@@ -7467,7 +7510,7 @@ bool multi_delete_set_locks_and_link_aux_tables(LEX *lex)
lex->table_count= 0;
- for (target_tbl= (TABLE_LIST *)lex->auxilliary_table_list.first;
+ for (target_tbl= (TABLE_LIST *)lex->auxiliary_table_list.first;
target_tbl; target_tbl= target_tbl->next_local)
{
lex->table_count++;
@@ -7520,8 +7563,7 @@ bool update_precheck(THD *thd, TABLE_LIST *tables)
my_message(ER_WRONG_VALUE_COUNT, ER(ER_WRONG_VALUE_COUNT), MYF(0));
DBUG_RETURN(TRUE);
}
- DBUG_RETURN(check_db_used(thd, tables) ||
- check_one_table_access(thd, UPDATE_ACL, tables));
+ DBUG_RETURN(check_one_table_access(thd, UPDATE_ACL, tables));
}
@@ -7771,3 +7813,34 @@ LEX_USER *create_definer(THD *thd, LEX_STRING *user_name, LEX_STRING *host_name)
return definer;
}
+
+
+/*
+ Retuns information about user or current user.
+
+ SYNOPSIS
+ get_current_user()
+ thd [in] thread handler
+ user [in] user
+
+ RETURN
+ On success, return a valid pointer to initialized
+ LEX_USER, which contains user information.
+ On error, return 0.
+*/
+
+LEX_USER *get_current_user(THD *thd, LEX_USER *user)
+{
+ LEX_USER *curr_user;
+ if (!user->user.str) // current_user
+ {
+ if (!(curr_user= (LEX_USER*) thd->alloc(sizeof(LEX_USER))))
+ {
+ my_error(ER_OUTOFMEMORY, MYF(0), sizeof(LEX_USER));
+ return 0;
+ }
+ get_default_definer(thd, curr_user);
+ return curr_user;
+ }
+ return user;
+}
diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc
index 8350d0b8ffd..73091c0994e 100644
--- a/sql/sql_partition.cc
+++ b/sql/sql_partition.cc
@@ -2415,6 +2415,10 @@ uint32 get_list_array_idx_for_endpoint(partition_info *part_info,
bool unsigned_flag= part_info->part_expr->unsigned_flag;
DBUG_ENTER("get_list_array_idx_for_endpoint");
+ if (part_info->part_expr->null_value)
+ {
+ DBUG_RETURN(0);
+ }
if (unsigned_flag)
part_func_value-= 0x8000000000000000ULL;
DBUG_ASSERT(part_info->no_list_values);
@@ -2539,6 +2543,13 @@ uint32 get_partition_id_range_for_endpoint(partition_info *part_info,
bool unsigned_flag= part_info->part_expr->unsigned_flag;
DBUG_ENTER("get_partition_id_range_for_endpoint");
+ if (part_info->part_expr->null_value)
+ {
+ uint32 ret_part_id= 0;
+ if (!left_endpoint && include_endpoint)
+ ret_part_id= 1;
+ DBUG_RETURN(ret_part_id);
+ }
if (unsigned_flag)
part_func_value-= 0x8000000000000000ULL;
while (max_part_id > min_part_id)
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index ca56c39204b..ca31845ff95 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -2141,7 +2141,8 @@ void reinit_stmt_before_use(THD *thd, LEX *lex)
Reset old pointers to TABLEs: they are not valid since the tables
were closed in the end of previous prepare or execute call.
*/
- tables->table= 0;
+ tables->reinit_before_use(thd);
+
/* Reset is_schema_table_processed value(needed for I_S tables */
tables->is_schema_table_processed= FALSE;
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 8b32ae47e3d..36f87a7fe6d 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -689,6 +689,25 @@ JOIN::optimize()
DBUG_PRINT("info",("Select tables optimized away"));
zero_result_cause= "Select tables optimized away";
tables_list= 0; // All tables resolved
+ /*
+ Extract all table-independent conditions and replace the WHERE
+ clause with them. All other conditions were computed by opt_sum_query
+ and the MIN/MAX/COUNT function(s) have been replaced by constants,
+ so there is no need to compute the whole WHERE clause again.
+ Notice that make_cond_for_table() will always succeed to remove all
+ computed conditions, because opt_sum_query() is applicable only to
+ conjunctions.
+ Preserve conditions for EXPLAIN.
+ */
+ if (conds && !(thd->lex->describe & DESCRIBE_EXTENDED))
+ {
+ COND *table_independent_conds=
+ make_cond_for_table(conds, PSEUDO_TABLE_BITS, 0);
+ DBUG_EXECUTE("where",
+ print_where(table_independent_conds,
+ "where after opt_sum_query()"););
+ conds= table_independent_conds;
+ }
}
}
if (!tables_list)
@@ -1067,6 +1086,23 @@ JOIN::optimize()
{
need_tmp=1; simple_order=simple_group=0; // Force tmp table without sort
}
+ if (order)
+ {
+ /*
+ Force using of tmp table if sorting by a SP or UDF function due to
+ their expensive and probably non-deterministic nature.
+ */
+ for (ORDER *tmp_order= order; tmp_order ; tmp_order=tmp_order->next)
+ {
+ Item *item= *tmp_order->item;
+ if (item->walk(&Item::is_expensive_processor, 0, (byte*)0))
+ {
+ /* Force tmp table without sort */
+ need_tmp=1; simple_order=simple_group=0;
+ break;
+ }
+ }
+ }
}
tmp_having= having;
@@ -1212,6 +1248,11 @@ int
JOIN::reinit()
{
DBUG_ENTER("JOIN::reinit");
+
+ unit->offset_limit_cnt= (ha_rows)(select_lex->offset_limit ?
+ select_lex->offset_limit->val_uint() :
+ ULL(0));
+
first_record= 0;
if (exec_tmp_table1)
@@ -2493,8 +2534,11 @@ merge_key_fields(KEY_FIELD *start,KEY_FIELD *new_fields,KEY_FIELD *end,
/* field = expression OR field IS NULL */
old->level= and_level;
old->optimize= KEY_OPTIMIZE_REF_OR_NULL;
- /* Remember the NOT NULL value */
- if (old->val->is_null())
+ /*
+ Remember the NOT NULL value unless the value does not depend
+ on other tables.
+ */
+ if (!old->val->used_tables() && old->val->is_null())
old->val= new_fields->val;
/* The referred expression can be NULL: */
old->null_rejecting= 0;
@@ -5537,6 +5581,7 @@ make_join_readinfo(JOIN *join, uint options)
{
uint i;
bool statistics= test(!(join->select_options & SELECT_DESCRIBE));
+ bool ordered_set= 0;
bool sorted= 1;
DBUG_ENTER("make_join_readinfo");
@@ -5547,6 +5592,22 @@ make_join_readinfo(JOIN *join, uint options)
tab->read_record.table= table;
tab->read_record.file=table->file;
tab->next_select=sub_select; /* normal select */
+
+ /*
+ Determine if the set is already ordered for ORDER BY, so it can
+ disable join cache because it will change the ordering of the results.
+ Code handles sort table that is at any location (not only first after
+ the const tables) despite the fact that it's currently prohibited.
+ */
+ if (!ordered_set &&
+ (table == join->sort_by_table &&
+ (!join->order || join->skip_sort_order ||
+ test_if_skip_sort_order(tab, join->order, join->select_limit,
+ 1))
+ ) ||
+ (join->sort_by_table == (TABLE *) 1 && i != join->const_tables))
+ ordered_set= 1;
+
tab->sorted= sorted;
sorted= 0; // only first must be sorted
switch (tab->type) {
@@ -5619,10 +5680,11 @@ make_join_readinfo(JOIN *join, uint options)
case JT_ALL:
/*
If previous table use cache
+ If the incoming data set is already sorted don't use cache.
*/
table->status=STATUS_NO_RECORD;
if (i != join->const_tables && !(options & SELECT_NO_JOIN_CACHE) &&
- tab->use_quick != 2 && !tab->first_inner)
+ tab->use_quick != 2 && !tab->first_inner && !ordered_set)
{
if ((options & SELECT_DESCRIBE) ||
!join_init_cache(join->thd,join->join_tab+join->const_tables,
@@ -6209,10 +6271,16 @@ return_zero_rows(JOIN *join, select_result *result,TABLE_LIST *tables,
DBUG_RETURN(0);
}
-
+/*
+ used only in JOIN::clear
+*/
static void clear_tables(JOIN *join)
{
- for (uint i=0 ; i < join->tables ; i++)
+ /*
+ must clear only the non-const tables, as const tables
+ are not re-calculated.
+ */
+ for (uint i=join->const_tables ; i < join->tables ; i++)
mark_as_null_row(join->table[i]); // All fields are NULL
}
@@ -7912,7 +7980,8 @@ remove_eq_conds(THD *thd, COND *cond, Item::cond_result *cond_value)
Field *field=((Item_field*) args[0])->field;
if (field->flags & AUTO_INCREMENT_FLAG && !field->table->maybe_null &&
(thd->options & OPTION_AUTO_IS_NULL) &&
- thd->insert_id())
+ (thd->first_successful_insert_id_in_prev_stmt > 0 &&
+ thd->substitute_null_with_insert_id))
{
#ifdef HAVE_QUERY_CACHE
query_cache_abort(&thd->net);
@@ -7920,7 +7989,7 @@ remove_eq_conds(THD *thd, COND *cond, Item::cond_result *cond_value)
COND *new_cond;
if ((new_cond= new Item_func_eq(args[0],
new Item_int("last_insert_id()",
- thd->insert_id(),
+ thd->read_first_successful_insert_id_in_prev_stmt(),
21))))
{
cond=new_cond;
@@ -7931,7 +8000,11 @@ remove_eq_conds(THD *thd, COND *cond, Item::cond_result *cond_value)
*/
cond->fix_fields(thd, &cond);
}
- thd->insert_id(0); // Clear for next request
+ /*
+ IS NULL should be mapped to LAST_INSERT_ID only for first row, so
+ clear for next row
+ */
+ thd->substitute_null_with_insert_id= FALSE;
}
/* fix to replace 'NULL' dates with '0' (shreeve@uci.edu) */
else if (((field->type() == FIELD_TYPE_DATE) ||
@@ -8069,13 +8142,19 @@ Field *create_tmp_field_from_field(THD *thd, Field *org_field,
{
Field *new_field;
- if (convert_blob_length && (org_field->flags & BLOB_FLAG))
+ /*
+ Make sure that the blob fits into a Field_varstring which has
+ 2-byte lenght.
+ */
+ if (convert_blob_length && convert_blob_length < UINT_MAX16 &&
+ (org_field->flags & BLOB_FLAG))
new_field= new Field_varstring(convert_blob_length,
org_field->maybe_null(),
org_field->field_name, table->s,
org_field->charset());
else
- new_field= org_field->new_field(thd->mem_root, table);
+ new_field= org_field->new_field(thd->mem_root, table,
+ table == org_field->table);
if (new_field)
{
new_field->init(table);
@@ -8133,8 +8212,13 @@ static Field *create_tmp_field_from_item(THD *thd, Item *item, TABLE *table,
item->name, item->decimals);
break;
case INT_RESULT:
- new_field= new Field_longlong(item->max_length, maybe_null,
- item->name, item->unsigned_flag);
+ /* Select an integer type with the minimal fit precision */
+ if (item->max_length > 11)
+ new_field=new Field_longlong(item->max_length, maybe_null,
+ item->name, item->unsigned_flag);
+ else
+ new_field=new Field_long(item->max_length, maybe_null,
+ item->name, item->unsigned_flag);
break;
case STRING_RESULT:
DBUG_ASSERT(item->collation.collation);
@@ -8147,8 +8231,13 @@ static Field *create_tmp_field_from_item(THD *thd, Item *item, TABLE *table,
if ((type= item->field_type()) == MYSQL_TYPE_DATETIME ||
type == MYSQL_TYPE_TIME || type == MYSQL_TYPE_DATE)
new_field= item->tmp_table_field_from_field_type(table, 1);
+ /*
+ Make sure that the blob fits into a Field_varstring which has
+ 2-byte lenght.
+ */
else if (item->max_length/item->collation.collation->mbmaxlen > 255 &&
- convert_blob_length)
+ item->max_length/item->collation.collation->mbmaxlen < UINT_MAX16
+ && convert_blob_length)
new_field= new Field_varstring(convert_blob_length, maybe_null,
item->name, table->s,
item->collation.collation);
@@ -9410,9 +9499,9 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
/* copy row that filled HEAP table */
if ((write_err=new_table.file->write_row(table->record[0])))
{
- if (write_err != HA_ERR_FOUND_DUPP_KEY &&
- write_err != HA_ERR_FOUND_DUPP_UNIQUE || !ignore_last_dupp_key_error)
- goto err;
+ if (new_table.file->is_fatal_error(write_err, HA_CHECK_DUP) ||
+ !ignore_last_dupp_key_error)
+ goto err;
}
/* remove heap table and change to use myisam table */
@@ -10722,8 +10811,13 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
{
if (!join->first_record)
{
+ List_iterator_fast<Item> it(*join->fields);
+ Item *item;
/* No matching rows for group function */
join->clear();
+
+ while ((item= it++))
+ item->no_rows_in_result();
}
if (join->having && join->having->val_int() == 0)
error= -1; // Didn't satisfy having
@@ -10833,8 +10927,7 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
join->found_records++;
if ((error=table->file->write_row(table->record[0])))
{
- if (error == HA_ERR_FOUND_DUPP_KEY ||
- error == HA_ERR_FOUND_DUPP_UNIQUE)
+ if (!table->file->is_fatal_error(error, HA_CHECK_DUP))
goto end;
if (create_myisam_from_heap(join->thd, table, &join->tmp_table_param,
error,1))
@@ -12894,7 +12987,7 @@ count_field_types(TMP_TABLE_PARAM *param, List<Item> &fields,
{
if (! field->const_item())
{
- Item_sum *sum_item=(Item_sum*) field;
+ Item_sum *sum_item=(Item_sum*) field->real_item();
if (!sum_item->quick_group)
param->quick_group=0; // UDF SUM function
param->sum_func_count++;
@@ -13154,10 +13247,11 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
param->copy_funcs.empty();
for (i= 0; (pos= li++); i++)
{
- if (pos->real_item()->type() == Item::FIELD_ITEM)
+ Item *real_pos= pos->real_item();
+ if (real_pos->type() == Item::FIELD_ITEM)
{
Item_field *item;
- pos= pos->real_item();
+ pos= real_pos;
if (!(item= new Item_field(thd, ((Item_field*) pos))))
goto err;
pos= item;
@@ -13184,7 +13278,7 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
saved value
*/
Field *field= item->field;
- item->result_field=field->new_field(thd->mem_root,field->table);
+ item->result_field=field->new_field(thd->mem_root,field->table, 1);
char *tmp=(char*) sql_alloc(field->pack_length()+1);
if (!tmp)
goto err;
@@ -13196,12 +13290,13 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
}
}
}
- else if ((pos->type() == Item::FUNC_ITEM ||
- pos->type() == Item::SUBSELECT_ITEM ||
- pos->type() == Item::CACHE_ITEM ||
- pos->type() == Item::COND_ITEM) &&
- !pos->with_sum_func)
+ else if ((real_pos->type() == Item::FUNC_ITEM ||
+ real_pos->type() == Item::SUBSELECT_ITEM ||
+ real_pos->type() == Item::CACHE_ITEM ||
+ real_pos->type() == Item::COND_ITEM) &&
+ !real_pos->with_sum_func)
{ // Save for send fields
+ pos= real_pos;
/* TODO:
In most cases this result will be sent to the user.
This should be changed to use copy_int or copy_real depending
@@ -14628,10 +14723,19 @@ void st_select_lex::print(THD *thd, String *str)
str->append(STRING_WITH_LEN("sql_buffer_result "));
if (options & OPTION_FOUND_ROWS)
str->append(STRING_WITH_LEN("sql_calc_found_rows "));
- if (!thd->lex->safe_to_cache_query)
- str->append(STRING_WITH_LEN("sql_no_cache "));
- if (options & OPTION_TO_QUERY_CACHE)
- str->append(STRING_WITH_LEN("sql_cache "));
+ switch (sql_cache)
+ {
+ case SQL_NO_CACHE:
+ str->append(STRING_WITH_LEN("sql_no_cache "));
+ break;
+ case SQL_CACHE:
+ str->append(STRING_WITH_LEN("sql_cache "));
+ break;
+ case SQL_CACHE_UNSPECIFIED:
+ break;
+ default:
+ DBUG_ASSERT(0);
+ }
//Item List
bool first= 1;
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index 8f8c84c2db5..bf09f516499 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -71,6 +71,10 @@ static TYPELIB grant_types = { sizeof(grant_names)/sizeof(char **),
static void store_key_options(THD *thd, String *packet, TABLE *table,
KEY *key_info);
+static void
+append_algorithm(TABLE_LIST *table, String *buff);
+
+
/***************************************************************************
** List all table types supported
***************************************************************************/
@@ -766,7 +770,14 @@ mysqld_list_fields(THD *thd, TABLE_LIST *table_list, const char *wild)
{
if (!wild || !wild[0] ||
!wild_case_compare(system_charset_info, field->field_name,wild))
- field_list.push_back(new Item_field(field));
+ {
+ if (table_list->view)
+ field_list.push_back(new Item_ident_for_show(field,
+ table_list->view_db.str,
+ table_list->view_name.str));
+ else
+ field_list.push_back(new Item_field(field));
+ }
}
restore_record(table, s->default_values); // Get empty record
table->use_all_columns();
@@ -1341,10 +1352,10 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
packet->append(buff, (uint) (end - buff));
}
table->file->append_create_info(packet);
- if (share->comment && share->comment[0])
+ if (share->comment.length)
{
packet->append(STRING_WITH_LEN(" COMMENT="));
- append_unescaped(packet, share->comment, strlen(share->comment));
+ append_unescaped(packet, share->comment.str, share->comment.length);
}
if (share->connect_string.length)
{
@@ -1423,6 +1434,28 @@ static void store_key_options(THD *thd, String *packet, TABLE *table,
void
view_store_options(THD *thd, TABLE_LIST *table, String *buff)
{
+ append_algorithm(table, buff);
+ append_definer(thd, buff, &table->definer.user, &table->definer.host);
+ if (table->view_suid)
+ buff->append(STRING_WITH_LEN("SQL SECURITY DEFINER "));
+ else
+ buff->append(STRING_WITH_LEN("SQL SECURITY INVOKER "));
+}
+
+
+/*
+ Append DEFINER clause to the given buffer.
+
+ SYNOPSIS
+ append_definer()
+ thd [in] thread handle
+ buffer [inout] buffer to hold DEFINER clause
+ definer_user [in] user name part of definer
+ definer_host [in] host name part of definer
+*/
+
+static void append_algorithm(TABLE_LIST *table, String *buff)
+{
buff->append(STRING_WITH_LEN("ALGORITHM="));
switch ((int8)table->algorithm) {
case VIEW_ALGORITHM_UNDEFINED:
@@ -1437,11 +1470,6 @@ view_store_options(THD *thd, TABLE_LIST *table, String *buff)
default:
DBUG_ASSERT(0); // never should happen
}
- append_definer(thd, buff, &table->definer.user, &table->definer.host);
- if (table->view_suid)
- buff->append(STRING_WITH_LEN("SQL SECURITY DEFINER "));
- else
- buff->append(STRING_WITH_LEN("SQL SECURITY INVOKER "));
}
/*
@@ -2880,11 +2908,14 @@ static int get_schema_tables_record(THD *thd, struct st_table_list *tables,
(uint) (ptr-option_buff)-1), cs);
{
char *comment;
- comment= show_table->file->update_table_comment(share->comment);
+ comment= show_table->file->update_table_comment(share->comment.str);
if (comment)
{
- table->field[20]->store(comment, strlen(comment), cs);
- if (comment != share->comment)
+ table->field[20]->store(comment,
+ (comment == share->comment.str ?
+ share->comment.length :
+ strlen(comment)), cs);
+ if (comment != share->comment.str)
my_free(comment, MYF(0));
}
}
@@ -3309,6 +3340,7 @@ bool store_schema_proc(THD *thd, TABLE *table, TABLE *proc_table,
{
get_field(thd->mem_root, proc_table->field[10], &tmp_string);
table->field[7]->store(tmp_string.ptr(), tmp_string.length(), cs);
+ table->field[7]->set_notnull();
}
table->field[6]->store(STRING_WITH_LEN("SQL"), cs);
table->field[10]->store(STRING_WITH_LEN("SQL"), cs);
@@ -3517,7 +3549,16 @@ static int get_schema_views_record(THD *thd, struct st_table_list *tables,
table->field[1]->store(tables->view_db.str, tables->view_db.length, cs);
table->field[2]->store(tables->view_name.str, tables->view_name.length, cs);
if (grant & SHOW_VIEW_ACL)
- table->field[3]->store(tables->query.str, tables->query.length, cs);
+ {
+ char buff[2048];
+ String qwe_str(buff, sizeof(buff), cs);
+ qwe_str.length(0);
+ qwe_str.append(STRING_WITH_LEN("/* "));
+ append_algorithm(tables, &qwe_str);
+ qwe_str.append(STRING_WITH_LEN("*/ "));
+ qwe_str.append(tables->query.str, tables->query.length);
+ table->field[3]->store(qwe_str.ptr(), qwe_str.length(), cs);
+ }
if (tables->with_check != VIEW_CHECK_NONE)
{
@@ -5066,6 +5107,7 @@ bool get_schema_tables_result(JOIN *join)
table_list->table->file->delete_all_rows();
free_io_cache(table_list->table);
filesort_free_buffers(table_list->table);
+ table_list->table->null_row= 0;
}
else
table_list->table->file->stats.records= 0;
@@ -5253,7 +5295,7 @@ ST_FIELD_INFO proc_fields_info[]=
{"ROUTINE_TYPE", 9, MYSQL_TYPE_STRING, 0, 0, "Type"},
{"DTD_IDENTIFIER", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0},
{"ROUTINE_BODY", 8, MYSQL_TYPE_STRING, 0, 0, 0},
- {"ROUTINE_DEFINITION", 65535, MYSQL_TYPE_STRING, 0, 0, 0},
+ {"ROUTINE_DEFINITION", 65535, MYSQL_TYPE_STRING, 0, 1, 0},
{"EXTERNAL_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0},
{"EXTERNAL_LANGUAGE", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0},
{"PARAMETER_STYLE", 8, MYSQL_TYPE_STRING, 0, 0, 0},
@@ -5451,7 +5493,7 @@ ST_FIELD_INFO partitions_fields_info[]=
{"CHECK_TIME", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, 0},
{"CHECKSUM", 21 , MYSQL_TYPE_LONG, 0, 1, 0},
{"PARTITION_COMMENT", 80, MYSQL_TYPE_STRING, 0, 0, 0},
- {"NODEGROUP", 21 , MYSQL_TYPE_LONG, 0, 0, 0},
+ {"NODEGROUP", 12 , MYSQL_TYPE_STRING, 0, 0, 0},
{"TABLESPACE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0},
{0, 0, MYSQL_TYPE_STRING, 0, 0, 0}
};
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 7d8631e3236..945fac83ff2 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -35,9 +35,7 @@ const char *primary_key_name="PRIMARY";
static bool check_if_keyname_exists(const char *name,KEY *start, KEY *end);
static char *make_unique_key_name(const char *field_name,KEY *start,KEY *end);
static int copy_data_between_tables(TABLE *from,TABLE *to,
- List<create_field> &create,
- enum enum_duplicates handle_duplicates,
- bool ignore,
+ List<create_field> &create, bool ignore,
uint order_num, ORDER *order,
ha_rows *copied,ha_rows *deleted);
static bool prepare_blob_field(THD *thd, create_field *sql_field);
@@ -3278,8 +3276,23 @@ bool mysql_create_table_internal(THD *thd,
my_casedn_str(files_charset_info, path);
create_info->table_options|=HA_CREATE_DELAY_KEY_WRITE;
}
- else
+ else
+ {
+ #ifdef FN_DEVCHAR
+ /* check if the table name contains FN_DEVCHAR when defined */
+ const char *start= alias;
+ while (*start != '\0')
+ {
+ if (*start == FN_DEVCHAR)
+ {
+ my_error(ER_WRONG_TABLE_NAME, MYF(0), alias);
+ DBUG_RETURN(TRUE);
+ }
+ start++;
+ }
+#endif
path_length= build_table_filename(path, sizeof(path), db, alias, reg_ext);
+ }
/* Check if table already exists */
if ((create_info->options & HA_LEX_CREATE_TMP_TABLE) &&
@@ -3297,8 +3310,7 @@ bool mysql_create_table_internal(THD *thd,
my_error(ER_TABLE_EXISTS_ERROR, MYF(0), alias);
goto err;
}
- if (wait_if_global_read_lock(thd, 0, 1))
- goto err;
+
VOID(pthread_mutex_lock(&LOCK_open));
if (!internal_tmp_table && !(create_info->options & HA_LEX_CREATE_TMP_TABLE))
{
@@ -3309,7 +3321,19 @@ bool mysql_create_table_internal(THD *thd,
my_error(ER_TABLE_EXISTS_ERROR,MYF(0),table_name);
goto unlock_and_end;
}
- DBUG_ASSERT(get_cached_table_share(db, alias) == 0);
+ /*
+ We don't assert here, but check the result, because the table could be
+ in the table definition cache and in the same time the .frm could be
+ missing from the disk, in case of manual intervention which deletes
+ the .frm file. The user has to use FLUSH TABLES; to clear the cache.
+ Then she could create the table. This case is pretty obscure and
+ therefore we don't introduce a new error message only for it.
+ */
+ if (get_cached_table_share(db, alias))
+ {
+ my_error(ER_TABLE_EXISTS_ERROR, MYF(0), table_name);
+ goto unlock_and_end;
+ }
}
/*
@@ -3374,7 +3398,6 @@ bool mysql_create_table_internal(THD *thd,
error= FALSE;
unlock_and_end:
VOID(pthread_mutex_unlock(&LOCK_open));
- start_waiting_global_read_lock(thd);
err:
thd->proc_info="After create";
@@ -3606,7 +3629,7 @@ void close_cached_table(THD *thd, TABLE *table)
thd->open_tables=unlink_open_table(thd,thd->open_tables,table);
/* When lock on LOCK_open is freed other threads can continue */
- pthread_cond_broadcast(&COND_refresh);
+ broadcast_refresh();
DBUG_VOID_RETURN;
}
@@ -4393,7 +4416,8 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table,
{
DBUG_RETURN(TRUE);
}
- src_db= table_ident->db.str ? table_ident->db.str : thd->db;
+ DBUG_ASSERT(table_ident->db.str); /* Must be set in the parser */
+ src_db= table_ident->db.str;
/*
Validate the source table
@@ -4941,8 +4965,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
HA_CREATE_INFO *lex_create_info,
TABLE_LIST *table_list,
List<create_field> &fields, List<Key> &keys,
- uint order_num, ORDER *order,
- enum enum_duplicates handle_duplicates, bool ignore,
+ uint order_num, ORDER *order, bool ignore,
ALTER_INFO *alter_info, bool do_send_ok)
{
TABLE *table,*new_table=0;
@@ -4953,7 +4976,6 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
char path[FN_REFLEN];
char reg_path[FN_REFLEN+1];
ha_rows copied,deleted;
- ulonglong next_insert_id;
uint db_create_options, used_fields;
handlerton *old_db_type, *new_db_type;
HA_CREATE_INFO *create_info;
@@ -5098,9 +5120,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
ha_resolve_storage_engine_name(old_db_type),
ha_resolve_storage_engine_name(new_db_type)));
if (ha_check_storage_engine_flag(old_db_type, HTON_ALTER_NOT_SUPPORTED) ||
- ha_check_storage_engine_flag(new_db_type, HTON_ALTER_NOT_SUPPORTED) ||
- (old_db_type != new_db_type &&
- ha_check_storage_engine_flag(new_db_type, HTON_ALTER_CANNOT_CREATE)))
+ ha_check_storage_engine_flag(new_db_type, HTON_ALTER_NOT_SUPPORTED))
{
DBUG_PRINT("info", ("doesn't support alter"));
my_error(ER_ILLEGAL_HA, MYF(0), table_name);
@@ -5469,8 +5489,11 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
goto err;
}
create_info->db_type=new_db_type;
- if (!create_info->comment)
- create_info->comment= table->s->comment;
+ if (!create_info->comment.str)
+ {
+ create_info->comment.str= table->s->comment.str;
+ create_info->comment.length= table->s->comment.length;
+ }
table->file->update_create_info(create_info);
if ((create_info->table_options &
@@ -5773,18 +5796,15 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
thd->count_cuted_fields= CHECK_FIELD_WARN; // calc cuted fields
thd->cuted_fields=0L;
thd->proc_info="copy to tmp table";
- next_insert_id=thd->next_insert_id; // Remember for logging
copied=deleted=0;
if (new_table && !(new_table->file->ha_table_flags() & HA_NO_COPY_ON_ALTER))
{
/* We don't want update TIMESTAMP fields during ALTER TABLE. */
new_table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
new_table->next_number_field=new_table->found_next_number_field;
- error=copy_data_between_tables(table,new_table,create_list,
- handle_duplicates, ignore,
+ error=copy_data_between_tables(table, new_table, create_list, ignore,
order_num, order, &copied, &deleted);
}
- thd->last_insert_id=next_insert_id; // Needed for correct log
thd->count_cuted_fields= CHECK_FIELD_IGNORE;
/* If we did not need to copy, we might still need to add/drop indexes. */
@@ -6118,7 +6138,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
}
}
VOID(pthread_mutex_unlock(&LOCK_open));
- VOID(pthread_cond_broadcast(&COND_refresh));
+ broadcast_refresh();
/*
The ALTER TABLE is always in its own transaction.
Commit must not be called while LOCK_open is locked. It could call
@@ -6195,7 +6215,6 @@ end_temporary:
static int
copy_data_between_tables(TABLE *from,TABLE *to,
List<create_field> &create,
- enum enum_duplicates handle_duplicates,
bool ignore,
uint order_num, ORDER *order,
ha_rows *copied,
@@ -6214,6 +6233,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
ha_rows examined_rows;
bool auto_increment_field_copied= 0;
ulong save_sql_mode;
+ ulonglong prev_insert_id;
DBUG_ENTER("copy_data_between_tables");
/*
@@ -6294,8 +6314,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
/* Tell handler that we have values for all columns in the to table */
to->use_all_columns();
init_read_record(&info, thd, from, (SQL_SELECT *) 0, 1,1);
- if (ignore ||
- handle_duplicates == DUP_REPLACE)
+ if (ignore)
to->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
thd->row_count= 0;
restore_record(to, s->default_values); // Create empty record
@@ -6320,14 +6339,13 @@ copy_data_between_tables(TABLE *from,TABLE *to,
{
copy_ptr->do_copy(copy_ptr);
}
+ prev_insert_id= to->file->next_insert_id;
if ((error=to->file->ha_write_row((byte*) to->record[0])))
{
- if ((!ignore &&
- handle_duplicates != DUP_REPLACE) ||
- (error != HA_ERR_FOUND_DUPP_KEY &&
- error != HA_ERR_FOUND_DUPP_UNIQUE))
+ if (!ignore ||
+ to->file->is_fatal_error(error, HA_CHECK_DUP))
{
- if (error == HA_ERR_FOUND_DUPP_KEY)
+ if (!to->file->is_fatal_error(error, HA_CHECK_DUP))
{
uint key_nr= to->file->get_dup_key(error);
if ((int) key_nr >= 0)
@@ -6345,7 +6363,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
to->file->print_error(error,MYF(0));
break;
}
- to->file->restore_auto_increment();
+ to->file->restore_auto_increment(prev_insert_id);
delete_count++;
}
else
@@ -6379,6 +6397,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
free_io_cache(from);
*copied= found_count;
*deleted=delete_count;
+ to->file->ha_release_auto_increment();
if (to->file->ha_external_lock(thd,F_UNLCK))
error=1;
DBUG_RETURN(error > 0 ? -1 : 0);
@@ -6416,7 +6435,7 @@ bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list,
DBUG_RETURN(mysql_alter_table(thd, NullS, NullS, &create_info,
table_list, lex->create_list,
lex->key_list, 0, (ORDER *) 0,
- DUP_ERROR, 0, &lex->alter_info, do_send_ok));
+ 0, &lex->alter_info, do_send_ok));
}
diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc
index 0ea87f3dfe4..1837372c6c4 100644
--- a/sql/sql_trigger.cc
+++ b/sql/sql_trigger.cc
@@ -183,6 +183,15 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create)
!(tables= add_table_for_trigger(thd, thd->lex->spname)))
DBUG_RETURN(TRUE);
+ /*
+ We don't allow creating triggers on tables in the 'mysql' schema
+ */
+ if (create && !my_strcasecmp(system_charset_info, "mysql", tables->db))
+ {
+ my_error(ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+
/* We should have only one table in table list. */
DBUG_ASSERT(tables->next_global == 0);
@@ -372,7 +381,9 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables,
/* We don't allow creation of several triggers of the same type yet */
if (bodies[lex->trg_chistics.event][lex->trg_chistics.action_time])
{
- my_message(ER_TRG_ALREADY_EXISTS, ER(ER_TRG_ALREADY_EXISTS), MYF(0));
+ my_error(ER_NOT_SUPPORTED_YET, MYF(0),
+ "multiple triggers with the same action time"
+ " and event for one table");
return 1;
}
@@ -733,7 +744,8 @@ bool Table_triggers_list::prepare_record1_accessors(TABLE *table)
QQ: it is supposed that it is ok to use this function for field
cloning...
*/
- if (!(*old_fld= (*fld)->new_field(&table->mem_root, table)))
+ if (!(*old_fld= (*fld)->new_field(&table->mem_root, table,
+ table == (*fld)->table)))
return 1;
(*old_fld)->move_field_offset((my_ptrdiff_t)(table->record[1] -
table->record[0]));
@@ -928,8 +940,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
save_db.str= thd->db;
save_db.length= thd->db_length;
- thd->db_length= strlen(db);
- thd->db= (char *) db;
+ thd->reset_db((char*) db, strlen(db));
while ((trg_create_str= it++))
{
trg_sql_mode= itm++;
@@ -1010,8 +1021,15 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
}
/*
- Let us bind Item_trigger_field objects representing access to fields
- in old/new versions of row in trigger to Field objects in table being
+ Gather all Item_trigger_field objects representing access to fields
+ in old/new versions of row in trigger into lists containing all such
+ objects for the triggers with same action and timing.
+ */
+ triggers->trigger_fields[lex.trg_chistics.event]
+ [lex.trg_chistics.action_time]=
+ (Item_trigger_field *)(lex.trg_table_fields.first);
+ /*
+ Also let us bind these objects to Field objects in table being
opened.
We ignore errors here, because if even something is wrong we still
@@ -1031,8 +1049,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
lex_end(&lex);
}
- thd->db= save_db.str;
- thd->db_length= save_db.length;
+ thd->reset_db(save_db.str, save_db.length);
thd->lex= old_lex;
thd->spcont= save_spcont;
thd->variables.sql_mode= save_sql_mode;
@@ -1045,8 +1062,7 @@ err_with_lex_cleanup:
thd->lex= old_lex;
thd->spcont= save_spcont;
thd->variables.sql_mode= save_sql_mode;
- thd->db= save_db.str;
- thd->db_length= save_db.length;
+ thd->reset_db(save_db.str, save_db.length);
DBUG_RETURN(1);
}
@@ -1527,6 +1543,44 @@ bool Table_triggers_list::process_triggers(THD *thd, trg_event_type event,
/*
+ Mark fields of subject table which we read/set in its triggers as such.
+
+ SYNOPSIS
+ mark_fields_used()
+ thd Current thread context
+ event Type of event triggers for which we are going to inspect
+
+ DESCRIPTION
+ This method marks fields of subject table which are read/set in its
+ triggers as such (by properly updating TABLE::read_set/write_set)
+ and thus informs handler that values for these fields should be
+ retrieved/stored during execution of statement.
+*/
+
+void Table_triggers_list::mark_fields_used(trg_event_type event)
+{
+ int action_time;
+ Item_trigger_field *trg_field;
+
+ for (action_time= 0; action_time < (int)TRG_ACTION_MAX; action_time++)
+ {
+ for (trg_field= trigger_fields[event][action_time]; trg_field;
+ trg_field= trg_field->next_trg_field)
+ {
+ /* We cannot mark fields which does not present in table. */
+ if (trg_field->field_idx != (uint)-1)
+ {
+ bitmap_set_bit(table->read_set, trg_field->field_idx);
+ if (trg_field->get_settable_routine_parameter())
+ bitmap_set_bit(table->write_set, trg_field->field_idx);
+ }
+ }
+ }
+ table->file->column_bitmaps_signal();
+}
+
+
+/*
Trigger BUG#14090 compatibility hook
SYNOPSIS
diff --git a/sql/sql_trigger.h b/sql/sql_trigger.h
index bddfd8c1f0c..13a919c09ca 100644
--- a/sql/sql_trigger.h
+++ b/sql/sql_trigger.h
@@ -26,6 +26,11 @@ class Table_triggers_list: public Sql_alloc
/* Triggers as SPs grouped by event, action_time */
sp_head *bodies[TRG_EVENT_MAX][TRG_ACTION_MAX];
/*
+ Heads of the lists linking items for all fields used in triggers
+ grouped by event and action_time.
+ */
+ Item_trigger_field *trigger_fields[TRG_EVENT_MAX][TRG_ACTION_MAX];
+ /*
Copy of TABLE::Field array with field pointers set to TABLE::record[1]
buffer instead of TABLE::record[0] (used for OLD values in on UPDATE
trigger and DELETE trigger when it is called for REPLACE).
@@ -82,6 +87,7 @@ public:
record1_field(0), table(table_arg)
{
bzero((char *)bodies, sizeof(bodies));
+ bzero((char *)trigger_fields, sizeof(trigger_fields));
bzero((char *)&subject_table_grants, sizeof(subject_table_grants));
}
~Table_triggers_list();
@@ -119,6 +125,8 @@ public:
void set_table(TABLE *new_table);
+ void mark_fields_used(trg_event_type event);
+
friend class Item_trigger_field;
friend int sp_cache_routines_and_add_tables_for_triggers(THD *thd, LEX *lex,
TABLE_LIST *table);
@@ -132,10 +140,6 @@ private:
const char *db_name,
LEX_STRING *old_table_name,
LEX_STRING *new_table_name);
- friend void st_table::mark_columns_needed_for_insert(void);
- friend void st_table::mark_columns_needed_for_update(void);
- friend void st_table::mark_columns_needed_for_delete(void);
-
};
extern const LEX_STRING trg_action_time_type_names[];
diff --git a/sql/sql_udf.cc b/sql/sql_udf.cc
index 4b9de6905fe..13302c2c3f7 100644
--- a/sql/sql_udf.cc
+++ b/sql/sql_udf.cc
@@ -57,7 +57,7 @@ static char *init_syms(udf_func *tmp, char *nm)
{
char *end;
- if (!((tmp->func= dlsym(tmp->dlhandle, tmp->name.str))))
+ if (!((tmp->func= (Udf_func_any) dlsym(tmp->dlhandle, tmp->name.str))))
return tmp->name.str;
end=strmov(nm,tmp->name.str);
@@ -65,18 +65,18 @@ static char *init_syms(udf_func *tmp, char *nm)
if (tmp->type == UDFTYPE_AGGREGATE)
{
(void)strmov(end, "_clear");
- if (!((tmp->func_clear= dlsym(tmp->dlhandle, nm))))
+ if (!((tmp->func_clear= (Udf_func_clear) dlsym(tmp->dlhandle, nm))))
return nm;
(void)strmov(end, "_add");
- if (!((tmp->func_add= dlsym(tmp->dlhandle, nm))))
+ if (!((tmp->func_add= (Udf_func_add) dlsym(tmp->dlhandle, nm))))
return nm;
}
(void) strmov(end,"_deinit");
- tmp->func_deinit= dlsym(tmp->dlhandle, nm);
+ tmp->func_deinit= (Udf_func_deinit) dlsym(tmp->dlhandle, nm);
(void) strmov(end,"_init");
- tmp->func_init= dlsym(tmp->dlhandle, nm);
+ tmp->func_init= (Udf_func_init) dlsym(tmp->dlhandle, nm);
/*
to prefent loading "udf" from, e.g. libc.so
@@ -114,7 +114,8 @@ void udf_init()
READ_RECORD read_record_info;
TABLE *table;
int error;
- DBUG_ENTER("udf_init");
+ DBUG_ENTER("ufd_init");
+ char db[]= "mysql"; /* A subject to casednstr, can't be constant */
if (initialized)
DBUG_VOID_RETURN;
@@ -135,13 +136,12 @@ void udf_init()
initialized = 1;
new_thd->thread_stack= (char*) &new_thd;
new_thd->store_globals();
- new_thd->db= my_strdup("mysql", MYF(0));
- new_thd->db_length=5;
+ new_thd->set_db(db, sizeof(db)-1);
bzero((gptr) &tables,sizeof(tables));
tables.alias= tables.table_name= (char*) "func";
tables.lock_type = TL_READ;
- tables.db=new_thd->db;
+ tables.db= db;
if (simple_open_n_lock_tables(new_thd, &tables))
{
diff --git a/sql/sql_udf.h b/sql/sql_udf.h
index d588572a762..21cf735f5ab 100644
--- a/sql/sql_udf.h
+++ b/sql/sql_udf.h
@@ -23,6 +23,15 @@
enum Item_udftype {UDFTYPE_FUNCTION=1,UDFTYPE_AGGREGATE};
+typedef void (*Udf_func_clear)(UDF_INIT *, uchar *, uchar *);
+typedef void (*Udf_func_add)(UDF_INIT *, UDF_ARGS *, uchar *, uchar *);
+typedef void (*Udf_func_deinit)(UDF_INIT*);
+typedef my_bool (*Udf_func_init)(UDF_INIT *, UDF_ARGS *, char *);
+typedef void (*Udf_func_any)();
+typedef double (*Udf_func_double)(UDF_INIT *, UDF_ARGS *, uchar *, uchar *);
+typedef longlong (*Udf_func_longlong)(UDF_INIT *, UDF_ARGS *, uchar *,
+ uchar *);
+
typedef struct st_udf_func
{
LEX_STRING name;
@@ -30,11 +39,11 @@ typedef struct st_udf_func
Item_udftype type;
char *dl;
void *dlhandle;
- void *func;
- void *func_init;
- void *func_deinit;
- void *func_clear;
- void *func_add;
+ Udf_func_any func;
+ Udf_func_init func_init;
+ Udf_func_deinit func_deinit;
+ Udf_func_clear func_clear;
+ Udf_func_add func_add;
ulong usage_count;
} udf_func;
@@ -70,13 +79,13 @@ class udf_handler :public Sql_alloc
void cleanup();
double val(my_bool *null_value)
{
+ is_null= 0;
if (get_arguments())
{
*null_value=1;
return 0.0;
}
- double (*func)(UDF_INIT *, UDF_ARGS *, uchar *, uchar *)=
- (double (*)(UDF_INIT *, UDF_ARGS *, uchar *, uchar *)) u_d->func;
+ Udf_func_double func= (Udf_func_double) u_d->func;
double tmp=func(&initid, &f_args, &is_null, &error);
if (is_null || error)
{
@@ -88,13 +97,13 @@ class udf_handler :public Sql_alloc
}
longlong val_int(my_bool *null_value)
{
+ is_null= 0;
if (get_arguments())
{
*null_value=1;
return LL(0);
}
- longlong (*func)(UDF_INIT *, UDF_ARGS *, uchar *, uchar *)=
- (longlong (*)(UDF_INIT *, UDF_ARGS *, uchar *, uchar *)) u_d->func;
+ Udf_func_longlong func= (Udf_func_longlong) u_d->func;
longlong tmp=func(&initid, &f_args, &is_null, &error);
if (is_null || error)
{
@@ -108,8 +117,7 @@ class udf_handler :public Sql_alloc
void clear()
{
is_null= 0;
- void (*func)(UDF_INIT *, uchar *, uchar *)=
- (void (*)(UDF_INIT *, uchar *, uchar *)) u_d->func_clear;
+ Udf_func_clear func= u_d->func_clear;
func(&initid, &is_null, &error);
}
void add(my_bool *null_value)
@@ -119,8 +127,7 @@ class udf_handler :public Sql_alloc
*null_value=1;
return;
}
- void (*func)(UDF_INIT *, UDF_ARGS *, uchar *, uchar *)=
- (void (*)(UDF_INIT *, UDF_ARGS *, uchar *, uchar *)) u_d->func_add;
+ Udf_func_add func= u_d->func_add;
func(&initid, &f_args, &is_null, &error);
*null_value= (my_bool) (is_null || error);
}
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index bf93f0d3bea..3e6a3944093 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -65,7 +65,7 @@ bool select_union::send_data(List<Item> &values)
if ((error= table->file->ha_write_row(table->record[0])))
{
/* create_myisam_from_heap will generate error if needed */
- if (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE &&
+ if (table->file->is_fatal_error(error, HA_CHECK_DUP) &&
create_myisam_from_heap(thd, table, &tmp_table_param, error, 1))
return 1;
}
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index da529cc0070..1b97acbcbfe 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -135,7 +135,8 @@ int mysql_update(THD *thd,
SQL_SELECT *select;
READ_RECORD info;
SELECT_LEX *select_lex= &thd->lex->select_lex;
- bool need_reopen;
+ bool need_reopen;
+ ulonglong id;
DBUG_ENTER("mysql_update");
for ( ; ; )
@@ -458,7 +459,7 @@ int mysql_update(THD *thd,
can_compare_record= (!(table->file->ha_table_flags() &
HA_PARTIAL_COLUMN_READ) ||
bitmap_is_subset(table->write_set, table->read_set));
-
+
while (!(error=info.read_record(&info)) && !thd->killed)
{
if (!(select && select->skip_record()))
@@ -541,13 +542,14 @@ int mysql_update(THD *thd,
break;
}
}
- else if (!ignore || error != HA_ERR_FOUND_DUPP_KEY)
+ else if (!ignore ||
+ table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
{
/*
- If (ignore && error == HA_ERR_FOUND_DUPP_KEY) we don't have to
+ If (ignore && error is ignorable) we don't have to
do anything; otherwise...
*/
- if (error != HA_ERR_FOUND_DUPP_KEY)
+ if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
thd->fatal_error(); /* Other handler errors are fatal */
table->file->print_error(error,MYF(0));
error= 1;
@@ -675,6 +677,10 @@ int mysql_update(THD *thd,
thd->lock=0;
}
+ /* If LAST_INSERT_ID(X) was used, report X */
+ id= thd->arg_of_last_insert_id_function ?
+ thd->first_successful_insert_id_in_prev_stmt : 0;
+
if (error < 0)
{
char buff[STRING_BUFFER_USUAL_SIZE];
@@ -682,8 +688,7 @@ int mysql_update(THD *thd,
(ulong) thd->cuted_fields);
thd->row_count_func=
(thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated;
- send_ok(thd, (ulong) thd->row_count_func,
- thd->insert_id_used ? thd->insert_id() : 0L,buff);
+ send_ok(thd, (ulong) thd->row_count_func, id, buff);
DBUG_PRINT("info",("%d records updated",updated));
}
thd->count_cuted_fields= CHECK_FIELD_IGNORE; /* calc cuted fields */
@@ -1422,13 +1427,14 @@ bool multi_update::send_data(List<Item> &not_used_values)
table->record[0])))
{
updated--;
- if (!ignore || error != HA_ERR_FOUND_DUPP_KEY)
+ if (!ignore ||
+ table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
{
/*
- If (ignore && error == HA_ERR_FOUND_DUPP_KEY) we don't have to
+ If (ignore && error == is ignorable) we don't have to
do anything; otherwise...
*/
- if (error != HA_ERR_FOUND_DUPP_KEY)
+ if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
thd->fatal_error(); /* Other handler errors are fatal */
table->file->print_error(error,MYF(0));
DBUG_RETURN(1);
@@ -1457,8 +1463,7 @@ bool multi_update::send_data(List<Item> &not_used_values)
/* Write row, ignoring duplicated updates to a row */
if ((error= tmp_table->file->ha_write_row(tmp_table->record[0])))
{
- if (error != HA_ERR_FOUND_DUPP_KEY &&
- error != HA_ERR_FOUND_DUPP_UNIQUE &&
+ if (tmp_table->file->is_fatal_error(error, HA_CHECK_DUP) &&
create_myisam_from_heap(thd, tmp_table,
tmp_table_param + offset, error, 1))
{
@@ -1581,7 +1586,8 @@ int multi_update::do_updates(bool from_send_error)
if ((local_error=table->file->ha_update_row(table->record[1],
table->record[0])))
{
- if (!ignore || local_error != HA_ERR_FOUND_DUPP_KEY)
+ if (!ignore ||
+ table->file->is_fatal_error(local_error, HA_CHECK_DUP_KEY))
goto err;
}
updated++;
@@ -1632,6 +1638,7 @@ err2:
bool multi_update::send_eof()
{
char buff[STRING_BUFFER_USUAL_SIZE];
+ ulonglong id;
thd->proc_info="updating reference tables";
/* Does updates for the last n - 1 tables, returns 0 if ok */
@@ -1684,12 +1691,12 @@ bool multi_update::send_eof()
return TRUE;
}
-
+ id= thd->arg_of_last_insert_id_function ?
+ thd->first_successful_insert_id_in_prev_stmt : 0;
sprintf(buff, ER(ER_UPDATE_INFO), (ulong) found, (ulong) updated,
(ulong) thd->cuted_fields);
thd->row_count_func=
(thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated;
- ::send_ok(thd, (ulong) thd->row_count_func,
- thd->insert_id_used ? thd->insert_id() : 0L,buff);
+ ::send_ok(thd, (ulong) thd->row_count_func, id, buff);
return FALSE;
}
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index d1e7ba80ecf..d24b247a22f 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -452,15 +452,15 @@ bool mysql_create_view(THD *thd,
*/
for (sl= select_lex; sl; sl= sl->next_select())
{
- char *db= view->db ? view->db : thd->db;
+ DBUG_ASSERT(view->db); /* Must be set in the parser */
List_iterator_fast<Item> it(sl->item_list);
Item *item;
- fill_effective_table_privileges(thd, &view->grant, db,
+ fill_effective_table_privileges(thd, &view->grant, view->db,
view->table_name);
while ((item= it++))
{
Item_field *fld;
- uint priv= (get_column_grant(thd, &view->grant, db,
+ uint priv= (get_column_grant(thd, &view->grant, view->db,
view->table_name, item->name) &
VIEW_ANY_ACL);
if ((fld= item->filed_for_view_update()))
@@ -643,8 +643,7 @@ static int mysql_register_view(THD *thd, TABLE_LIST *view,
if (!parser->ok() || !is_equal(&view_type, parser->type()))
{
- my_error(ER_WRONG_OBJECT, MYF(0),
- (view->db ? view->db : thd->db), view->table_name, "VIEW");
+ my_error(ER_WRONG_OBJECT, MYF(0), view->db, view->table_name, "VIEW");
DBUG_RETURN(-1);
}
@@ -997,6 +996,15 @@ bool mysql_make_view(THD *thd, File_parser *parser, TABLE_LIST *table)
table->next_global= view_tables;
}
+#ifdef HAVE_ROW_BASED_REPLICATION
+ /*
+ If the view's body needs row-based binlogging (e.g. the VIEW is created
+ from SELECT UUID()), the top statement also needs it.
+ */
+ if (lex->binlog_row_based_if_mixed)
+ old_lex->binlog_row_based_if_mixed= TRUE;
+#endif
+
/*
If we are opening this view as part of implicit LOCK TABLES, then
this view serves as simple placeholder and we should not continue
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 0632e2298cd..9236820cd25 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -1594,12 +1594,18 @@ sp_name:
}
| ident
{
+ THD *thd= YYTHD;
+ LEX_STRING db;
if (check_routine_name($1))
{
my_error(ER_SP_WRONG_NAME, MYF(0), $1.str);
YYABORT;
}
- $$= sp_name_current_db_new(YYTHD, $1);
+ if (thd->copy_db_to(&db.str, &db.length))
+ YYABORT;
+ $$= new sp_name(db, $1);
+ if ($$)
+ $$->init_qname(YYTHD);
}
;
@@ -3184,14 +3190,26 @@ create2:
| LIKE table_ident
{
LEX *lex=Lex;
+ THD *thd= lex->thd;
if (!(lex->like_name= $2))
YYABORT;
+ if ($2->db.str == NULL &&
+ thd->copy_db_to(&($2->db.str), &($2->db.length)))
+ {
+ YYABORT;
+ }
}
| '(' LIKE table_ident ')'
{
LEX *lex=Lex;
+ THD *thd= lex->thd;
if (!(lex->like_name= $3))
YYABORT;
+ if ($3->db.str == NULL &&
+ thd->copy_db_to(&($3->db.str), &($3->db.length)))
+ {
+ YYABORT;
+ }
}
;
@@ -3716,14 +3734,15 @@ sub_part_definition:
{
LEX *lex= Lex;
partition_info *part_info= lex->part_info;
- partition_element *p_elem= new partition_element();
- if (!p_elem ||
- part_info->current_partition->subpartitions.push_back(p_elem))
+ partition_element *curr_part= part_info->current_partition;
+ partition_element *sub_p_elem= new partition_element(curr_part);
+ if (!sub_p_elem ||
+ curr_part->subpartitions.push_back(sub_p_elem))
{
mem_alloc_error(sizeof(partition_element));
YYABORT;
}
- part_info->curr_part_elem= p_elem;
+ part_info->curr_part_elem= sub_p_elem;
part_info->use_default_subpartitions= FALSE;
part_info->use_default_no_subpartitions= FALSE;
part_info->count_curr_subparts++;
@@ -3861,7 +3880,7 @@ create_table_option:
| MIN_ROWS opt_equal ulonglong_num { Lex->create_info.min_rows= $3; Lex->create_info.used_fields|= HA_CREATE_USED_MIN_ROWS;}
| AVG_ROW_LENGTH opt_equal ulong_num { Lex->create_info.avg_row_length=$3; Lex->create_info.used_fields|= HA_CREATE_USED_AVG_ROW_LENGTH;}
| PASSWORD opt_equal TEXT_STRING_sys { Lex->create_info.password=$3.str; Lex->create_info.used_fields|= HA_CREATE_USED_PASSWORD; }
- | COMMENT_SYM opt_equal TEXT_STRING_sys { Lex->create_info.comment=$3.str; Lex->create_info.used_fields|= HA_CREATE_USED_COMMENT; }
+ | COMMENT_SYM opt_equal TEXT_STRING_sys { Lex->create_info.comment=$3; Lex->create_info.used_fields|= HA_CREATE_USED_COMMENT; }
| AUTO_INC opt_equal ulonglong_num { Lex->create_info.auto_increment_value=$3; Lex->create_info.used_fields|= HA_CREATE_USED_AUTO;}
| PACK_KEYS_SYM opt_equal ulong_num
{
@@ -4640,8 +4659,10 @@ alter:
lex->key_list.empty();
lex->col_list.empty();
lex->select_lex.init_order();
- lex->select_lex.db=lex->name= 0;
+ lex->name= 0;
lex->like_name= 0;
+ lex->select_lex.db=
+ ((TABLE_LIST*) lex->select_lex.table_list.first)->db;
bzero((char*) &lex->create_info,sizeof(lex->create_info));
lex->create_info.db_type= 0;
lex->create_info.default_table_charset= NULL;
@@ -4660,8 +4681,11 @@ alter:
opt_create_database_options
{
LEX *lex=Lex;
+ THD *thd= Lex->thd;
lex->sql_command=SQLCOM_ALTER_DB;
lex->name= $3;
+ if (lex->name == NULL && thd->copy_db_to(&lex->name, NULL))
+ YYABORT;
}
| ALTER PROCEDURE sp_name
{
@@ -5096,14 +5120,20 @@ alter_list_item:
| RENAME opt_to table_ident
{
LEX *lex=Lex;
+ THD *thd= lex->thd;
lex->select_lex.db=$3->db.str;
- lex->name= $3->table.str;
+ if (lex->select_lex.db == NULL &&
+ thd->copy_db_to(&lex->select_lex.db, NULL))
+ {
+ YYABORT;
+ }
if (check_table_name($3->table.str,$3->table.length) ||
$3->db.str && check_db_name($3->db.str))
{
my_error(ER_WRONG_TABLE_NAME, MYF(0), $3->table.str);
YYABORT;
}
+ lex->name= $3->table.str;
lex->alter_info.flags|= ALTER_RENAME;
}
| CONVERT_SYM TO_SYM charset charset_name_or_default opt_collate
@@ -5678,10 +5708,21 @@ select_option:
YYABORT;
Select->options|= OPTION_FOUND_ROWS;
}
- | SQL_NO_CACHE_SYM { Lex->safe_to_cache_query=0; }
+ | SQL_NO_CACHE_SYM
+ {
+ Lex->safe_to_cache_query=0;
+ Lex->select_lex.options&= ~OPTION_TO_QUERY_CACHE;
+ Lex->select_lex.sql_cache= SELECT_LEX::SQL_NO_CACHE;
+ }
| SQL_CACHE_SYM
{
- Lex->select_lex.options|= OPTION_TO_QUERY_CACHE;
+ /* Honor this flag only if SQL_NO_CACHE wasn't specified. */
+ if (Lex->select_lex.sql_cache != SELECT_LEX::SQL_NO_CACHE)
+ {
+ Lex->safe_to_cache_query=1;
+ Lex->select_lex.options|= OPTION_TO_QUERY_CACHE;
+ Lex->select_lex.sql_cache= SELECT_LEX::SQL_CACHE;
+ }
}
| ALL { Select->options|= SELECT_ALL; }
;
@@ -5725,8 +5766,8 @@ select_item:
YYABORT;
if ($4.str)
{
- $2->set_name($4.str, $4.length, system_charset_info);
$2->is_autogenerated_name= FALSE;
+ $2->set_name($4.str, $4.length, system_charset_info);
}
else if (!$2->name) {
char *str = $1;
@@ -6118,7 +6159,10 @@ simple_expr:
Lex->safe_to_cache_query=0;
}
| CURRENT_USER optional_braces
- { $$= create_func_current_user(); }
+ {
+ $$= new Item_func_current_user(Lex->current_context());
+ Lex->safe_to_cache_query= 0;
+ }
| DATE_ADD_INTERVAL '(' expr ',' interval_expr interval ')'
{ $$= new Item_date_add_interval($3,$5,$6,0); }
| DATE_SUB_INTERVAL '(' expr ',' interval_expr interval ')'
@@ -6374,7 +6418,7 @@ simple_expr:
if (udf->type == UDFTYPE_AGGREGATE)
Select->in_sum_expr--;
- Lex->binlog_row_based_if_mixed= 1;
+ Lex->binlog_row_based_if_mixed= TRUE;
switch (udf->returns) {
case STRING_RESULT:
@@ -6449,7 +6493,13 @@ simple_expr:
#endif /* HAVE_DLOPEN */
{
LEX *lex= Lex;
- sp_name *name= sp_name_current_db_new(YYTHD, $1);
+ THD *thd= lex->thd;
+ LEX_STRING db;
+ if (thd->copy_db_to(&db.str, &db.length))
+ YYABORT;
+ sp_name *name= new sp_name(db, $1);
+ if (name)
+ name->init_qname(thd);
sp_add_used_routine(lex, YYTHD, name, TYPE_ENUM_FUNCTION);
if ($4)
@@ -6471,7 +6521,7 @@ simple_expr:
| UNIX_TIMESTAMP '(' expr ')'
{ $$= new Item_func_unix_timestamp($3); }
| USER '(' ')'
- { $$= new Item_func_user(FALSE); Lex->safe_to_cache_query=0; }
+ { $$= new Item_func_user(); Lex->safe_to_cache_query=0; }
| UTC_DATE_SYM optional_braces
{ $$= new Item_func_curdate_utc(); Lex->safe_to_cache_query=0;}
| UTC_TIME_SYM optional_braces
@@ -6605,8 +6655,8 @@ udf_expr:
{
if ($4.str)
{
- $2->set_name($4.str, $4.length, system_charset_info);
$2->is_autogenerated_name= FALSE;
+ $2->set_name($4.str, $4.length, system_charset_info);
}
else
$2->set_name($1, (uint) ($3 - $1), YYTHD->charset());
@@ -8038,6 +8088,7 @@ truncate:
LEX* lex= Lex;
lex->sql_command= SQLCOM_TRUNCATE;
lex->select_lex.options= 0;
+ lex->select_lex.sql_cache= SELECT_LEX::SQL_CACHE_UNSPECIFIED;
lex->select_lex.init_order();
}
;
@@ -8305,24 +8356,10 @@ show_param:
{
LEX *lex=Lex;
lex->sql_command= SQLCOM_SHOW_GRANTS;
- THD *thd= lex->thd;
- Security_context *sctx= thd->security_ctx;
LEX_USER *curr_user;
- if (!(curr_user= (LEX_USER*) thd->alloc(sizeof(st_lex_user))))
+ if (!(curr_user= (LEX_USER*) lex->thd->alloc(sizeof(st_lex_user))))
YYABORT;
- curr_user->user.str= sctx->priv_user;
- curr_user->user.length= strlen(sctx->priv_user);
- if (*sctx->priv_host != 0)
- {
- curr_user->host.str= sctx->priv_host;
- curr_user->host.length= strlen(sctx->priv_host);
- }
- else
- {
- curr_user->host.str= (char *) "%";
- curr_user->host.length= 1;
- }
- curr_user->password=null_lex_str;
+ bzero(curr_user, sizeof(st_lex_user));
lex->grant_user= curr_user;
}
| GRANTS FOR_SYM user
@@ -9269,22 +9306,14 @@ user:
}
| CURRENT_USER optional_braces
{
- THD *thd= YYTHD;
- Security_context *sctx= thd->security_ctx;
- if (!($$=(LEX_USER*) thd->alloc(sizeof(st_lex_user))))
+ if (!($$=(LEX_USER*) YYTHD->alloc(sizeof(st_lex_user))))
YYABORT;
- $$->user.str= sctx->priv_user;
- $$->user.length= strlen(sctx->priv_user);
- if (*sctx->priv_host != 0)
- {
- $$->host.str= sctx->priv_host;
- $$->host.length= strlen(sctx->priv_host);
- }
- else
- {
- $$->host.str= (char *) "%";
- $$->host.length= 1;
- }
+ /*
+ empty LEX_USER means current_user and
+ will be handled in the get_current_user() function
+ later
+ */
+ bzero($$, sizeof(LEX_USER));
};
/* Keyword that we allow for identifiers (except SP labels) */
@@ -10328,7 +10357,9 @@ grant_ident:
'*'
{
LEX *lex= Lex;
- lex->current_select->db= lex->thd->db;
+ THD *thd= lex->thd;
+ if (thd->copy_db_to(&lex->current_select->db, NULL))
+ YYABORT;
if (lex->grant == GLOBAL_ACLS)
lex->grant = DB_ACLS & ~GRANT_ACL;
else if (lex->columns.elements)
diff --git a/sql/structs.h b/sql/structs.h
index 38bb441fc03..83ae6cac032 100644
--- a/sql/structs.h
+++ b/sql/structs.h
@@ -250,3 +250,99 @@ typedef struct user_conn {
#define STATUS_UPDATED 16 /* Record is updated by formula */
#define STATUS_NULL_ROW 32 /* table->null_row is set */
#define STATUS_DELETED 64
+
+/*
+ Such interval is "discrete": it is the set of
+ { auto_inc_interval_min + k * increment,
+ 0 <= k <= (auto_inc_interval_values-1) }
+ Where "increment" is maintained separately by the user of this class (and is
+ currently only thd->variables.auto_increment_increment).
+ It mustn't derive from Sql_alloc, because SET INSERT_ID needs to
+ allocate memory which must stay allocated for use by the next statement.
+*/
+class Discrete_interval {
+private:
+ ulonglong interval_min;
+ ulonglong interval_values;
+ ulonglong interval_max; // excluded bound. Redundant.
+public:
+ Discrete_interval *next; // used when linked into Discrete_intervals_list
+ void replace(ulonglong start, ulonglong val, ulonglong incr)
+ {
+ interval_min= start;
+ interval_values= val;
+ interval_max= (val == ULONGLONG_MAX) ? val : start + val * incr;
+ }
+ Discrete_interval(ulonglong start, ulonglong val, ulonglong incr) :
+ next(NULL) { replace(start, val, incr); };
+ Discrete_interval() : next(NULL) { replace(0, 0, 0); };
+ ulonglong minimum() const { return interval_min; };
+ ulonglong values() const { return interval_values; };
+ ulonglong maximum() const { return interval_max; };
+ /*
+ If appending [3,5] to [1,2], we merge both in [1,5] (they should have the
+ same increment for that, user of the class has to ensure that). That is
+ just a space optimization. Returns 0 if merge succeeded.
+ */
+ bool merge_if_contiguous(ulonglong start, ulonglong val, ulonglong incr)
+ {
+ if (interval_max == start)
+ {
+ if (val == ULONGLONG_MAX)
+ {
+ interval_values= interval_max= val;
+ }
+ else
+ {
+ interval_values+= val;
+ interval_max= start + val * incr;
+ }
+ return 0;
+ }
+ return 1;
+ };
+};
+
+/* List of Discrete_interval objects */
+class Discrete_intervals_list {
+private:
+ Discrete_interval *head;
+ Discrete_interval *tail;
+ /*
+ When many intervals are provided at the beginning of the execution of a
+ statement (in a replication slave or SET INSERT_ID), "current" points to
+ the interval being consumed by the thread now (so "current" goes from
+ "head" to "tail" then to NULL).
+ */
+ Discrete_interval *current;
+ uint elements; // number of elements
+public:
+ Discrete_intervals_list() : head(NULL), current(NULL), elements(0) {};
+ void empty_no_free()
+ {
+ head= current= NULL;
+ elements= 0;
+ }
+ void empty()
+ {
+ for (Discrete_interval *i= head; i;)
+ {
+ Discrete_interval *next= i->next;
+ delete i;
+ i= next;
+ }
+ empty_no_free();
+ }
+ const Discrete_interval* get_next()
+ {
+ Discrete_interval *tmp= current;
+ if (current != NULL)
+ current= current->next;
+ return tmp;
+ }
+ ~Discrete_intervals_list() { empty(); };
+ bool append(ulonglong start, ulonglong val, ulonglong incr);
+ ulonglong minimum() const { return (head ? head->minimum() : 0); };
+ ulonglong maximum() const { return (head ? tail->maximum() : 0); };
+ uint nb_elements() const { return elements; }
+};
diff --git a/sql/table.cc b/sql/table.cc
index a96ca0da881..71d4f8df837 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -743,7 +743,9 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
int_length= uint2korr(head+274);
share->null_fields= uint2korr(head+282);
com_length= uint2korr(head+284);
- share->comment= strdup_root(&share->mem_root, (char*) head+47);
+ share->comment.length= (int) (head[46]);
+ share->comment.str= strmake_root(&share->mem_root, (char*) head+47,
+ share->comment.length);
DBUG_PRINT("info",("i_count: %d i_parts: %d index: %d n_length: %d int_length: %d com_length: %d", interval_count,interval_parts, share->keys,n_length,int_length, com_length));
@@ -1456,7 +1458,7 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
Create a new field for the key part that matches the index
*/
field= key_part->field=field->new_field(&outparam->mem_root,
- outparam);
+ outparam, 0);
field->field_length= key_part->length;
}
}
@@ -3925,16 +3927,7 @@ void st_table::mark_auto_increment_column()
void st_table::mark_columns_needed_for_delete()
{
if (triggers)
- {
- if (triggers->bodies[TRG_EVENT_DELETE][TRG_ACTION_BEFORE] ||
- triggers->bodies[TRG_EVENT_DELETE][TRG_ACTION_AFTER])
- {
- /* TODO: optimize to only add columns used by trigger */
- use_all_columns();
- return;
- }
- }
-
+ triggers->mark_fields_used(TRG_EVENT_DELETE);
if (file->ha_table_flags() & HA_REQUIRES_KEY_COLUMNS_FOR_DELETE)
{
Field **reg_field;
@@ -3985,15 +3978,7 @@ void st_table::mark_columns_needed_for_update()
{
DBUG_ENTER("mark_columns_needed_for_update");
if (triggers)
- {
- if (triggers->bodies[TRG_EVENT_UPDATE][TRG_ACTION_BEFORE] ||
- triggers->bodies[TRG_EVENT_UPDATE][TRG_ACTION_AFTER])
- {
- /* TODO: optimize to only add columns used by trigger */
- use_all_columns();
- DBUG_VOID_RETURN;
- }
- }
+ triggers->mark_fields_used(TRG_EVENT_UPDATE);
if (file->ha_table_flags() & HA_REQUIRES_KEY_COLUMNS_FOR_DELETE)
{
/* Mark all used key columns for read */
@@ -4036,18 +4021,35 @@ void st_table::mark_columns_needed_for_insert()
{
if (triggers)
{
- if (triggers->bodies[TRG_EVENT_INSERT][TRG_ACTION_BEFORE] ||
- triggers->bodies[TRG_EVENT_INSERT][TRG_ACTION_AFTER])
- {
- /* TODO: optimize to only add columns used by trigger */
- use_all_columns();
- return;
- }
+ /*
+ We don't need to mark columns which are used by ON DELETE and
+ ON UPDATE triggers, which may be invoked in case of REPLACE or
+ INSERT ... ON DUPLICATE KEY UPDATE, since before doing actual
+ row replacement or update write_record() will mark all table
+ fields as used.
+ */
+ triggers->mark_fields_used(TRG_EVENT_INSERT);
}
if (found_next_number_field)
mark_auto_increment_column();
}
+/*
+ Cleanup this table for re-execution.
+
+ SYNOPSIS
+ st_table_list::reinit_before_use()
+*/
+
+void st_table_list::reinit_before_use(THD * /* thd */)
+{
+ /*
+ Reset old pointers to TABLEs: they are not valid since the tables
+ were closed in the end of previous prepare or execute call.
+ */
+ table= 0;
+}
+
/*****************************************************************************
** Instansiate templates
diff --git a/sql/table.h b/sql/table.h
index fad3c4b2b54..ee8dd7ac65e 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -134,7 +134,7 @@ typedef struct st_table_share
uint *blob_field; /* Index to blobs in Field arrray*/
byte *default_values; /* row with default values */
- char *comment; /* Comment about table */
+ LEX_STRING comment; /* Comment about table */
CHARSET_INFO *table_charset; /* Default charset of string fields */
MY_BITMAP all_set;
@@ -712,7 +712,8 @@ typedef struct st_table_list
thr_lock_type lock_type;
uint outer_join; /* Which join type */
uint shared; /* Used in multi-upd */
- uint32 db_length, table_name_length;
+ uint db_length;
+ uint32 table_name_length;
bool updatable; /* VIEW/TABLE can be updated now */
bool straight; /* optimize with prev table */
bool updating; /* for replicate-do/ignore table */
@@ -781,6 +782,11 @@ typedef struct st_table_list
Security_context *find_view_security_context(THD *thd);
bool prepare_view_securety_context(THD *thd);
#endif
+ /*
+ Cleanup for re-execution in a prepared statement or a stored
+ procedure.
+ */
+ void reinit_before_use(THD *thd);
private:
bool prep_check_option(THD *thd, uint8 check_opt_type);
diff --git a/sql/time.cc b/sql/time.cc
index ae776a32aab..0461f7723c6 100644
--- a/sql/time.cc
+++ b/sql/time.cc
@@ -749,6 +749,7 @@ void make_truncated_value_warning(THD *thd, const char *str_val,
ER_TRUNCATED_WRONG_VALUE, warn_buff);
}
+/* Daynumber from year 0 to 9999-12-31 */
#define MAX_DAY_NUMBER 3652424L
bool date_add_interval(TIME *ltime, interval_type int_type, INTERVAL interval)
@@ -804,7 +805,7 @@ bool date_add_interval(TIME *ltime, interval_type int_type, INTERVAL interval)
ltime->hour= (uint) (sec/3600);
daynr= calc_daynr(ltime->year,ltime->month,1) + days;
/* Day number from year 0 to 9999-12-31 */
- if ((ulonglong) daynr >= MAX_DAY_NUMBER)
+ if ((ulonglong) daynr > MAX_DAY_NUMBER)
goto invalid_date;
get_date_from_daynr((long) daynr, &ltime->year, &ltime->month,
&ltime->day);
@@ -815,7 +816,7 @@ bool date_add_interval(TIME *ltime, interval_type int_type, INTERVAL interval)
period= (calc_daynr(ltime->year,ltime->month,ltime->day) +
sign * (long) interval.day);
/* Daynumber from year 0 to 9999-12-31 */
- if ((ulong) period >= MAX_DAY_NUMBER)
+ if ((ulong) period > MAX_DAY_NUMBER)
goto invalid_date;
get_date_from_daynr((long) period,&ltime->year,&ltime->month,&ltime->day);
break;
diff --git a/sql/tztime.cc b/sql/tztime.cc
index f8de9bb48aa..4f6542bd043 100644
--- a/sql/tztime.cc
+++ b/sql/tztime.cc
@@ -1560,6 +1560,7 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
TABLE *table;
Tz_names_entry *tmp_tzname;
my_bool return_val= 1;
+ char db[]= "mysql";
int res;
DBUG_ENTER("my_tz_init");
@@ -1616,13 +1617,12 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
leap seconds shared by all time zones.
*/
- thd->db= my_strdup("mysql",MYF(0));
- thd->db_length= 5; // Safety
+ thd->set_db(db, sizeof(db)-1);
bzero((char*) &tables_buff, sizeof(TABLE_LIST));
tables_buff[0].alias= tables_buff[0].table_name=
(char*)"time_zone_leap_second";
tables_buff[0].lock_type= TL_READ;
- tables_buff[0].db= thd->db;
+ tables_buff[0].db= db;
/*
Fill TABLE_LIST for the rest of the time zone describing tables
and link it to first one.
diff --git a/sql/unireg.cc b/sql/unireg.cc
index 42518e7b9b7..396ff4fba27 100644
--- a/sql/unireg.cc
+++ b/sql/unireg.cc
@@ -24,7 +24,6 @@
str is a (long) to record position where 0 is the first position.
*/
-#define USES_TYPES
#include "mysql_priv.h"
#include <m_ctype.h>
#include <assert.h>
@@ -77,7 +76,7 @@ bool mysql_create_frm(THD *thd, const char *file_name,
handler *db_file)
{
LEX_STRING str_db_type;
- uint reclength,info_length,screens,key_info_length,maxlength,i;
+ uint reclength, info_length, screens, key_info_length, maxlength, tmp_len, i;
ulong key_buff_length;
File file;
ulong filepos, data_offset;
@@ -170,10 +169,30 @@ bool mysql_create_frm(THD *thd, const char *file_name,
fileinfo[26]= (uchar) test((create_info->max_rows == 1) &&
(create_info->min_rows == 1) && (keys == 0));
int2store(fileinfo+28,key_info_length);
- strmake((char*) forminfo+47,create_info->comment ? create_info->comment : "",
- 60);
- forminfo[46]=(uchar) strlen((char*)forminfo+47); // Length of comment
+ tmp_len= system_charset_info->cset->charpos(system_charset_info,
+ create_info->comment.str,
+ create_info->comment.str +
+ create_info->comment.length, 60);
+ if (tmp_len < create_info->comment.length)
+ {
+ char buff[128];
+ (void) my_snprintf(buff, sizeof(buff), "Too long comment for table '%s'",
+ table);
+ if ((thd->variables.sql_mode &
+ (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES)))
+ {
+ my_message(ER_UNKNOWN_ERROR, buff, MYF(0));
+ goto err;
+ }
+ push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_UNKNOWN_ERROR, ER(ER_UNKNOWN_ERROR), buff);
+ create_info->comment.length= tmp_len;
+ }
+
+ strmake((char*) forminfo+47, create_info->comment.str ?
+ create_info->comment.str : "", create_info->comment.length);
+ forminfo[46]=(uchar) create_info->comment.length;
#ifdef WITH_PARTITION_STORAGE_ENGINE
if (part_info)
{
@@ -182,6 +201,7 @@ bool mysql_create_frm(THD *thd, const char *file_name,
}
#endif
int2store(fileinfo+59,db_file->extra_rec_buf_length());
+
if (my_pwrite(file,(byte*) fileinfo,64,0L,MYF_RW) ||
my_pwrite(file,(byte*) keybuff,key_info_length,
(ulong) uint2korr(fileinfo+6),MYF_RW))
@@ -524,6 +544,27 @@ static bool pack_header(uchar *forminfo, enum legacy_db_type table_type,
create_field *field;
while ((field=it++))
{
+
+ uint tmp_len= system_charset_info->cset->charpos(system_charset_info,
+ field->comment.str,
+ field->comment.str +
+ field->comment.length, 255);
+ if (tmp_len < field->comment.length)
+ {
+ char buff[128];
+ (void) my_snprintf(buff,sizeof(buff), "Too long comment for field '%s'",
+ field->field_name);
+ if ((current_thd->variables.sql_mode &
+ (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES)))
+ {
+ my_message(ER_UNKNOWN_ERROR, buff, MYF(0));
+ DBUG_RETURN(1);
+ }
+ push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_UNKNOWN_ERROR, ER(ER_UNKNOWN_ERROR), buff);
+ field->comment.length= tmp_len;
+ }
+
totlength+= field->length;
com_length+= field->comment.length;
if (MTYP_TYPENR(field->unireg_check) == Field::NOEMPTY ||
diff --git a/storage/blackhole/ha_blackhole.cc b/storage/blackhole/ha_blackhole.cc
index 1a2bb264ef9..d22d9372e0d 100644
--- a/storage/blackhole/ha_blackhole.cc
+++ b/storage/blackhole/ha_blackhole.cc
@@ -206,7 +206,7 @@ static int blackhole_init()
blackhole_hton.state= SHOW_OPTION_YES;
blackhole_hton.db_type= DB_TYPE_BLACKHOLE_DB;
blackhole_hton.create= blackhole_create_handler;
- blackhole_hton.flags= HTON_CAN_RECREATE | HTON_ALTER_CANNOT_CREATE;
+ blackhole_hton.flags= HTON_CAN_RECREATE;
return 0;
}
diff --git a/storage/heap/hp_test1.c b/storage/heap/hp_test1.c
index dd696528eb8..703b39b1e2d 100644
--- a/storage/heap/hp_test1.c
+++ b/storage/heap/hp_test1.c
@@ -44,6 +44,7 @@ int main(int argc, char **argv)
get_options(argc,argv);
bzero(&hp_create_info, sizeof(hp_create_info));
+ hp_create_info.max_table_size= 1024L*1024L;
keyinfo[0].keysegs=1;
keyinfo[0].seg=keyseg;
@@ -58,7 +59,7 @@ int main(int argc, char **argv)
bzero((gptr) flags,sizeof(flags));
printf("- Creating heap-file\n");
- if (heap_create(filename,1,keyinfo,30,(ulong) flag*100000l,10l,
+ if (heap_create(filename,1,keyinfo,30,(ulong) flag*100000L,10L,
&hp_create_info) ||
!(file= heap_open(filename, 2)))
goto err;
diff --git a/storage/myisam/mi_check.c b/storage/myisam/mi_check.c
index d91597e9138..91c04866b5a 100644
--- a/storage/myisam/mi_check.c
+++ b/storage/myisam/mi_check.c
@@ -1158,13 +1158,14 @@ int chk_data_link(MI_CHECK *param, MI_INFO *info,int extend)
#ifdef HAVE_RTREE_KEYS
(keyinfo->flag & HA_SPATIAL) ?
rtree_find_first(info, key, info->lastkey, key_length,
- SEARCH_SAME) :
+ MBR_EQUAL | MBR_DATA) :
#endif
_mi_search(info,keyinfo,info->lastkey,key_length,
SEARCH_SAME, info->s->state.key_root[key]);
if (search_result)
{
- mi_check_print_error(param,"Record at: %10s Can't find key for index: %2d",
+ mi_check_print_error(param,"Record at: %10s "
+ "Can't find key for index: %2d",
llstr(start_recpos,llbuff),key+1);
if (error++ > MAXERR || !(param->testflag & T_VERBOSE))
goto err2;
diff --git a/storage/myisam/mi_create.c b/storage/myisam/mi_create.c
index 22cbde278be..f8fad493a91 100644
--- a/storage/myisam/mi_create.c
+++ b/storage/myisam/mi_create.c
@@ -60,6 +60,8 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
my_off_t key_root[MI_MAX_POSSIBLE_KEY],key_del[MI_MAX_KEY_BLOCK_SIZE];
MI_CREATE_INFO tmp_create_info;
DBUG_ENTER("mi_create");
+ DBUG_PRINT("enter", ("keys: %u columns: %u uniques: %u flags: %u",
+ keys, columns, uniques, flags));
if (!ci)
{
@@ -482,6 +484,16 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
uniques * MI_UNIQUEDEF_SIZE +
(key_segs + unique_key_parts)*HA_KEYSEG_SIZE+
columns*MI_COLUMNDEF_SIZE);
+ DBUG_PRINT("info", ("info_length: %u", info_length));
+ /* There are only 16 bits for the total header length. */
+ if (info_length > 65535)
+ {
+ my_printf_error(0, "MyISAM table '%s' has too many columns and/or "
+ "indexes and/or unique constraints.",
+ MYF(0), name + dirname_length(name));
+ my_errno= HA_WRONG_CREATE_OPTION;
+ goto err;
+ }
bmove(share.state.header.file_version,(byte*) myisam_file_magic,4);
ci->old_options=options| (ci->old_options & HA_OPTION_TEMP_COMPRESS_RECORD ?
@@ -562,9 +574,21 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
{
char *iext= strrchr(ci->index_file_name, '.');
int have_iext= iext && !strcmp(iext, MI_NAME_IEXT);
-
- fn_format(filename, ci->index_file_name, "", MI_NAME_IEXT,
- MY_UNPACK_FILENAME| (have_iext ? MY_REPLACE_EXT :MY_APPEND_EXT));
+ if (options & HA_OPTION_TMP_TABLE)
+ {
+ char *path;
+ /* chop off the table name, tempory tables use generated name */
+ if ((path= strrchr(ci->index_file_name, FN_LIBCHAR)))
+ *path= '\0';
+ fn_format(filename, name, ci->index_file_name, MI_NAME_IEXT,
+ MY_REPLACE_DIR | MY_UNPACK_FILENAME | MY_APPEND_EXT);
+ }
+ else
+ {
+ fn_format(filename, ci->index_file_name, "", MI_NAME_IEXT,
+ MY_UNPACK_FILENAME | (have_iext ? MY_REPLACE_EXT :
+ MY_APPEND_EXT));
+ }
fn_format(linkname, name, "", MI_NAME_IEXT,
MY_UNPACK_FILENAME|MY_APPEND_EXT);
linkname_ptr=linkname;
@@ -627,9 +651,22 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
char *dext= strrchr(ci->data_file_name, '.');
int have_dext= dext && !strcmp(dext, MI_NAME_DEXT);
- fn_format(filename, ci->data_file_name, "", MI_NAME_DEXT,
- MY_UNPACK_FILENAME |
- (have_dext ? MY_REPLACE_EXT : MY_APPEND_EXT));
+ if (options & HA_OPTION_TMP_TABLE)
+ {
+ char *path;
+ /* chop off the table name, tempory tables use generated name */
+ if ((path= strrchr(ci->data_file_name, FN_LIBCHAR)))
+ *path= '\0';
+ fn_format(filename, name, ci->data_file_name, MI_NAME_DEXT,
+ MY_REPLACE_DIR | MY_UNPACK_FILENAME | MY_APPEND_EXT);
+ }
+ else
+ {
+ fn_format(filename, ci->data_file_name, "", MI_NAME_DEXT,
+ MY_UNPACK_FILENAME |
+ (have_dext ? MY_REPLACE_EXT : MY_APPEND_EXT));
+ }
+
fn_format(linkname, name, "",MI_NAME_DEXT,
MY_UNPACK_FILENAME | MY_APPEND_EXT);
linkname_ptr=linkname;
@@ -650,6 +687,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
errpos=3;
}
+ DBUG_PRINT("info", ("write state info and base info"));
if (mi_state_info_write(file, &share.state, 2) ||
mi_base_info_write(file, &share.base))
goto err;
@@ -663,6 +701,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
#endif
/* Write key and keyseg definitions */
+ DBUG_PRINT("info", ("write key and keyseg definitions"));
for (i=0 ; i < share.base.keys - uniques; i++)
{
uint sp_segs=(keydefs[i].flag & HA_SPATIAL) ? 2*SPDIMS : 0;
@@ -713,6 +752,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
}
/* Save unique definition */
+ DBUG_PRINT("info", ("write unique definitions"));
for (i=0 ; i < share.state.header.uniques ; i++)
{
HA_KEYSEG *keyseg_end;
@@ -743,6 +783,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
goto err;
}
}
+ DBUG_PRINT("info", ("write field definitions"));
for (i=0 ; i < share.base.fields ; i++)
if (mi_recinfo_write(file, &recinfo[i]))
goto err;
@@ -757,6 +798,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
#endif
/* Enlarge files */
+ DBUG_PRINT("info", ("enlarge to keystart: %lu", (ulong) share.base.keystart));
if (my_chsize(file,(ulong) share.base.keystart,0,MYF(0)))
goto err;
diff --git a/storage/myisam/mi_delete_table.c b/storage/myisam/mi_delete_table.c
index df0e9deb3ec..b72e97d3215 100644
--- a/storage/myisam/mi_delete_table.c
+++ b/storage/myisam/mi_delete_table.c
@@ -34,12 +34,24 @@ int mi_delete_table(const char *name)
#ifdef USE_RAID
{
MI_INFO *info;
- /* we use 'open_for_repair' to be able to delete a crashed table */
- if (!(info=mi_open(name, O_RDONLY, HA_OPEN_FOR_REPAIR)))
- DBUG_RETURN(my_errno);
- raid_type = info->s->base.raid_type;
- raid_chunks = info->s->base.raid_chunks;
- mi_close(info);
+ /*
+ When built with RAID support, we need to determine if this table
+ makes use of the raid feature. If yes, we need to remove all raid
+ chunks. This is done with my_raid_delete(). Unfortunately it is
+ necessary to open the table just to check this. We use
+ 'open_for_repair' to be able to open even a crashed table. If even
+ this open fails, we assume no raid configuration for this table
+ and try to remove the normal data file only. This may however
+ leave the raid chunks behind.
+ */
+ if (!(info= mi_open(name, O_RDONLY, HA_OPEN_FOR_REPAIR)))
+ raid_type= 0;
+ else
+ {
+ raid_type= info->s->base.raid_type;
+ raid_chunks= info->s->base.raid_chunks;
+ mi_close(info);
+ }
}
#ifdef EXTRA_DEBUG
check_table_is_closed(name,"delete");
diff --git a/storage/myisam/mi_dynrec.c b/storage/myisam/mi_dynrec.c
index 9d76a1fb9a5..0487500ad33 100644
--- a/storage/myisam/mi_dynrec.c
+++ b/storage/myisam/mi_dynrec.c
@@ -1329,6 +1329,9 @@ int _mi_read_dynamic_record(MI_INFO *info, my_off_t filepos, byte *buf)
info->rec_cache.pos_in_file <= block_info.next_filepos &&
flush_io_cache(&info->rec_cache))
goto err;
+ /* A corrupted table can have wrong pointers. (Bug# 19835) */
+ if (block_info.next_filepos == HA_OFFSET_ERROR)
+ goto panic;
info->rec_cache.seek_not_done=1;
if ((b_type=_mi_get_block_info(&block_info,file,
block_info.next_filepos))
diff --git a/storage/myisam/mi_rkey.c b/storage/myisam/mi_rkey.c
index e6f4d39ab49..a9a8cbacb4b 100644
--- a/storage/myisam/mi_rkey.c
+++ b/storage/myisam/mi_rkey.c
@@ -68,6 +68,7 @@ int mi_rkey(MI_INFO *info, byte *buf, int inx, const byte *key, uint key_len,
if (fast_mi_readinfo(info))
goto err;
+
if (share->concurrent_insert)
rw_rdlock(&share->key_root_lock[inx]);
@@ -90,24 +91,35 @@ int mi_rkey(MI_INFO *info, byte *buf, int inx, const byte *key, uint key_len,
case HA_KEY_ALG_BTREE:
default:
if (!_mi_search(info, keyinfo, key_buff, use_key_length,
- myisam_read_vec[search_flag], info->s->state.key_root[inx]))
+ myisam_read_vec[search_flag], info->s->state.key_root[inx]))
{
- while (info->lastpos >= info->state->data_file_length)
+ /*
+ If we are searching for an exact key (including the data pointer)
+ and this was added by an concurrent insert,
+ then the result is "key not found".
+ */
+ if ((search_flag == HA_READ_KEY_EXACT) &&
+ (info->lastpos >= info->state->data_file_length))
+ {
+ my_errno= HA_ERR_KEY_NOT_FOUND;
+ info->lastpos= HA_OFFSET_ERROR;
+ }
+ else while (info->lastpos >= info->state->data_file_length)
{
/*
Skip rows that are inserted by other threads since we got a lock
Note that this can only happen if we are not searching after an
exact key, because the keys are sorted according to position
*/
-
if (_mi_search_next(info, keyinfo, info->lastkey,
- info->lastkey_length,
- myisam_readnext_vec[search_flag],
- info->s->state.key_root[inx]))
+ info->lastkey_length,
+ myisam_readnext_vec[search_flag],
+ info->s->state.key_root[inx]))
break;
}
}
}
+
if (share->concurrent_insert)
rw_unlock(&share->key_root_lock[inx]);
diff --git a/storage/myisam/rt_index.c b/storage/myisam/rt_index.c
index 97554dca4e6..1806476dc39 100644
--- a/storage/myisam/rt_index.c
+++ b/storage/myisam/rt_index.c
@@ -183,9 +183,11 @@ int rtree_find_first(MI_INFO *info, uint keynr, uchar *key, uint key_length,
return -1;
}
- /* Save searched key */
- memcpy(info->first_mbr_key, key, keyinfo->keylength -
- info->s->base.rec_reflength);
+ /*
+ Save searched key, include data pointer.
+ The data pointer is required if the search_flag contains MBR_DATA.
+ */
+ memcpy(info->first_mbr_key, key, keyinfo->keylength);
info->last_rkey_length = key_length;
info->rtree_recursion_depth = -1;
diff --git a/storage/myisam/rt_mbr.c b/storage/myisam/rt_mbr.c
index c43daec2f7c..897862c1c9a 100644
--- a/storage/myisam/rt_mbr.c
+++ b/storage/myisam/rt_mbr.c
@@ -52,10 +52,14 @@
if (EQUAL_CMP(amin, amax, bmin, bmax)) \
return 1; \
} \
- else /* if (nextflag & MBR_DISJOINT) */ \
+ else if (nextflag & MBR_DISJOINT) \
{ \
if (DISJOINT_CMP(amin, amax, bmin, bmax)) \
return 1; \
+ }\
+ else /* if unknown comparison operator */ \
+ { \
+ DBUG_ASSERT(0); \
}
#define RT_CMP_KORR(type, korr_func, len, nextflag) \
diff --git a/storage/ndb/include/kernel/AttributeHeader.hpp b/storage/ndb/include/kernel/AttributeHeader.hpp
index b17bb456bf0..b78b5912bec 100644
--- a/storage/ndb/include/kernel/AttributeHeader.hpp
+++ b/storage/ndb/include/kernel/AttributeHeader.hpp
@@ -39,12 +39,13 @@ public:
STATIC_CONST( RANGE_NO = 0xFFFB ); // Read range no (when batched ranges)
STATIC_CONST( ROW_SIZE = 0xFFFA );
- STATIC_CONST( FRAGMENT_MEMORY= 0xFFF9 );
+ STATIC_CONST( FRAGMENT_FIXED_MEMORY= 0xFFF9 );
STATIC_CONST( RECORDS_IN_RANGE = 0xFFF8 );
STATIC_CONST( DISK_REF = 0xFFF7 );
STATIC_CONST( ROWID = 0xFFF6 );
STATIC_CONST( ROW_GCI = 0xFFF5 );
+ STATIC_CONST( FRAGMENT_VARSIZED_MEMORY = 0xFFF4 );
// NOTE: in 5.1 ctors and init take size in bytes
diff --git a/storage/ndb/include/mgmapi/ndbd_exit_codes.h b/storage/ndb/include/mgmapi/ndbd_exit_codes.h
index b16f1a63a8d..79df36e7955 100644
--- a/storage/ndb/include/mgmapi/ndbd_exit_codes.h
+++ b/storage/ndb/include/mgmapi/ndbd_exit_codes.h
@@ -71,6 +71,7 @@ typedef ndbd_exit_classification_enum ndbd_exit_classification;
#define NDBD_EXIT_INDEX_NOTINRANGE 2304
#define NDBD_EXIT_ARBIT_SHUTDOWN 2305
#define NDBD_EXIT_POINTER_NOTINRANGE 2306
+#define NDBD_EXIT_PARTITIONED_SHUTDOWN 2307
#define NDBD_EXIT_SR_OTHERNODEFAILED 2308
#define NDBD_EXIT_NODE_NOT_DEAD 2309
#define NDBD_EXIT_SR_REDOLOG 2310
diff --git a/storage/ndb/include/ndbapi/Ndb.hpp b/storage/ndb/include/ndbapi/Ndb.hpp
index dcd03cdc467..07f11f6e78a 100644
--- a/storage/ndb/include/ndbapi/Ndb.hpp
+++ b/storage/ndb/include/ndbapi/Ndb.hpp
@@ -1553,6 +1553,7 @@ private:
const char* aCatalogName, const char* aSchemaName);
void connected(Uint32 block_reference);
+ void report_node_connected(Uint32 nodeId);
NdbTransaction* startTransactionLocal(Uint32 aPrio, Uint32 aFragmentId);
diff --git a/storage/ndb/include/ndbapi/NdbDictionary.hpp b/storage/ndb/include/ndbapi/NdbDictionary.hpp
index ea4a2a9ca29..35b0d927bda 100644
--- a/storage/ndb/include/ndbapi/NdbDictionary.hpp
+++ b/storage/ndb/include/ndbapi/NdbDictionary.hpp
@@ -525,7 +525,8 @@ public:
const char* getDefaultValue() const;
static const Column * FRAGMENT;
- static const Column * FRAGMENT_MEMORY;
+ static const Column * FRAGMENT_FIXED_MEMORY;
+ static const Column * FRAGMENT_VARSIZED_MEMORY;
static const Column * ROW_COUNT;
static const Column * COMMIT_COUNT;
static const Column * ROW_SIZE;
diff --git a/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp b/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp
index 7304a46a278..a96d0de0560 100644
--- a/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp
@@ -365,6 +365,8 @@ const GsnName SignalNames [] = {
,{ GSN_EVENT_SUBSCRIBE_REF, "EVENT_SUBSCRIBE_REF" }
,{ GSN_DUMP_STATE_ORD, "DUMP_STATE_ORD" }
+ ,{ GSN_NODE_START_REP, "NODE_START_REP" }
+
,{ GSN_START_INFOREQ, "START_INFOREQ" }
,{ GSN_START_INFOREF, "START_INFOREF" }
,{ GSN_START_INFOCONF, "START_INFOCONF" }
diff --git a/storage/ndb/src/kernel/blocks/ERROR_codes.txt b/storage/ndb/src/kernel/blocks/ERROR_codes.txt
index 867dc0d2efc..fc22118e113 100644
--- a/storage/ndb/src/kernel/blocks/ERROR_codes.txt
+++ b/storage/ndb/src/kernel/blocks/ERROR_codes.txt
@@ -2,7 +2,7 @@ Next QMGR 1
Next NDBCNTR 1000
Next NDBFS 2000
Next DBACC 3002
-Next DBTUP 4013
+Next DBTUP 4024
Next DBLQH 5043
Next DBDICT 6007
Next DBDIH 7177
@@ -437,6 +437,8 @@ Drop Table/Index:
8036: Fail next index drop in TC
6006: Crash participant in create index
+4013: verify TUP tab descr before and after next DROP TABLE
+
System Restart:
---------------
@@ -484,3 +486,6 @@ Dbtup:
4019 - handleInsert - Alloc rowid error
4020 - handleInsert - Size change error
4021 - handleInsert - Out of disk space
+
+4022 - addTuxEntries - fail before add of first entry
+4023 - addTuxEntries - fail add of last entry (the entry for last index)
diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
index 228dab57650..7c44fadfa62 100644
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
@@ -2075,6 +2075,8 @@ private:
*
* XXX only table ops check BlockState
*/
+ struct DictLockType;
+ friend struct DictLockType;
struct DictLockType {
DictLockReq::LockType lockType;
@@ -2082,6 +2084,9 @@ private:
const char* text;
};
+ struct DictLockRecord;
+ friend struct DictLockRecord;
+
struct DictLockRecord {
DictLockReq req;
const DictLockType* lt;
diff --git a/storage/ndb/src/kernel/blocks/dbdict/DictLock.txt b/storage/ndb/src/kernel/blocks/dbdict/DictLock.txt
index 17f24119e9d..72e23ed15a5 100644
--- a/storage/ndb/src/kernel/blocks/dbdict/DictLock.txt
+++ b/storage/ndb/src/kernel/blocks/dbdict/DictLock.txt
@@ -85,10 +85,14 @@ DIH/s
START_MECONF
DIH/s
-* sp7 - release DICT lock
+* (copy data, omitted)
-DIH/s
- DICT_UNLOCK_ORD
- DICT/m
+* SL_STARTED - release DICT lock
+
+CNTR/s
+ NODE_START_REP
+ DIH/s
+ DICT_UNLOCK_ORD
+ DICT/m
# vim: set et sw=4:
diff --git a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
index 46effed867f..9d9ea6af2f5 100644
--- a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
+++ b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
@@ -1609,6 +1609,9 @@ private:
void dump_replica_info();
+ // DIH specifics for execNODE_START_REP (sendDictUnlockOrd)
+ void exec_node_start_rep(Signal* signal);
+
/*
* Lock master DICT. Only current use is by starting node
* during NR. A pool of slave records is convenient anyway.
diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
index c265f54bf30..0595c018b2e 100644
--- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
+++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
@@ -1387,24 +1387,6 @@ void Dbdih::execNDB_STTOR(Signal* signal)
}
ndbrequire(false);
break;
- case ZNDB_SPH7:
- jam();
- switch (typestart) {
- case NodeState::ST_INITIAL_START:
- case NodeState::ST_SYSTEM_RESTART:
- jam();
- ndbsttorry10Lab(signal, __LINE__);
- return;
- case NodeState::ST_NODE_RESTART:
- case NodeState::ST_INITIAL_NODE_RESTART:
- jam();
- sendDictUnlockOrd(signal, c_dictLockSlavePtrI_nodeRestart);
- c_dictLockSlavePtrI_nodeRestart = RNIL;
- ndbsttorry10Lab(signal, __LINE__);
- return;
- }
- ndbrequire(false);
- break;
default:
jam();
ndbsttorry10Lab(signal, __LINE__);
@@ -1413,6 +1395,27 @@ void Dbdih::execNDB_STTOR(Signal* signal)
}//Dbdih::execNDB_STTOR()
void
+Dbdih::exec_node_start_rep(Signal* signal)
+{
+ /*
+ * Send DICT_UNLOCK_ORD when this node is SL_STARTED.
+ *
+ * Sending it before (sp 7) conflicts with code which assumes
+ * SL_STARTING means we are in copy phase of NR.
+ *
+ * NodeState::starting.restartType is not supposed to be used
+ * when SL_STARTED. Also it seems NODE_START_REP can arrive twice.
+ *
+ * For these reasons there are no consistency checks and
+ * we rely on c_dictLockSlavePtrI_nodeRestart alone.
+ */
+ if (c_dictLockSlavePtrI_nodeRestart != RNIL) {
+ sendDictUnlockOrd(signal, c_dictLockSlavePtrI_nodeRestart);
+ c_dictLockSlavePtrI_nodeRestart = RNIL;
+ }
+}
+
+void
Dbdih::createMutexes(Signal * signal, Uint32 count){
Callback c = { safe_cast(&Dbdih::createMutex_done), count };
@@ -1636,6 +1639,7 @@ void Dbdih::nodeRestartPh2Lab(Signal* signal)
void Dbdih::recvDictLockConf_nodeRestart(Signal* signal, Uint32 data, Uint32 ret)
{
ndbrequire(c_dictLockSlavePtrI_nodeRestart == RNIL);
+ ndbrequire(data != RNIL);
c_dictLockSlavePtrI_nodeRestart = data;
nodeRestartPh2Lab2(signal);
diff --git a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
index c1d4175833e..be52e06eb81 100644
--- a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
+++ b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
@@ -238,6 +238,7 @@ class Dbtup;
#define ZSCAN_MARKERS 18
#define ZOPERATION_EVENT_REP 19
#define ZPREP_DROP_TABLE 20
+#define ZENABLE_EXPAND_CHECK 21
/* ------------------------------------------------------------------------- */
/* NODE STATE DURING SYSTEM RESTART, VARIABLES CNODES_SR_STATE */
diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
index 3890fb69b2e..695580d556c 100644
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
@@ -416,6 +416,35 @@ void Dblqh::execCONTINUEB(Signal* signal)
checkDropTab(signal);
return;
break;
+ case ZENABLE_EXPAND_CHECK:
+ {
+ jam();
+ fragptr.i = signal->theData[1];
+ if (fragptr.i != RNIL)
+ {
+ jam();
+ c_redo_complete_fragments.getPtr(fragptr);
+ signal->theData[0] = fragptr.p->tabRef;
+ signal->theData[1] = fragptr.p->fragId;
+ sendSignal(DBACC_REF, GSN_EXPANDCHECK2, signal, 2, JBB);
+
+ c_redo_complete_fragments.next(fragptr);
+ signal->theData[0] = ZENABLE_EXPAND_CHECK;
+ signal->theData[1] = fragptr.i;
+ sendSignal(DBLQH_REF, GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ }
+ else
+ {
+ jam();
+ c_redo_complete_fragments.remove();
+ StartRecConf * conf = (StartRecConf*)signal->getDataPtrSend();
+ conf->startingNodeId = getOwnNodeId();
+ sendSignal(cmasterDihBlockref, GSN_START_RECCONF, signal,
+ StartRecConf::SignalLength, JBB);
+ return;
+ }
+ }
default:
ndbrequire(false);
break;
@@ -469,6 +498,7 @@ void Dblqh::execSTTOR(Signal* signal)
csignalKey = signal->theData[6];
#if defined VM_TRACE || defined ERROR_INSERT || defined NDBD_TRACENR
char *name;
+ FILE *out = 0;
#endif
switch (tstartPhase) {
case ZSTART_PHASE1:
@@ -480,8 +510,14 @@ void Dblqh::execSTTOR(Signal* signal)
sendsttorryLab(signal);
#if defined VM_TRACE || defined ERROR_INSERT || defined NDBD_TRACENR
- name = NdbConfig_SignalLogFileName(getOwnNodeId());
- tracenrout = new NdbOut(* new FileOutputStream(fopen(name, "w+")));
+#ifdef VM_TRACE
+ out = globalSignalLoggers.getOutputStream();
+#endif
+ if (out == 0) {
+ name = NdbConfig_SignalLogFileName(getOwnNodeId());
+ out = fopen(name, "a");
+ }
+ tracenrout = new NdbOut(* new FileOutputStream(out));
#endif
#ifdef ERROR_INSERT
@@ -15658,24 +15694,23 @@ void Dblqh::srFourthComp(Signal* signal)
} else if ((cstartType == NodeState::ST_NODE_RESTART) ||
(cstartType == NodeState::ST_SYSTEM_RESTART)) {
jam();
-
-
+ if(cstartType == NodeState::ST_SYSTEM_RESTART)
+ {
+ jam();
+ if (c_redo_complete_fragments.first(fragptr))
+ {
+ jam();
+ signal->theData[0] = ZENABLE_EXPAND_CHECK;
+ signal->theData[1] = fragptr.i;
+ sendSignal(DBLQH_REF, GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ }
+ }
StartRecConf * conf = (StartRecConf*)signal->getDataPtrSend();
conf->startingNodeId = getOwnNodeId();
sendSignal(cmasterDihBlockref, GSN_START_RECCONF, signal,
- StartRecConf::SignalLength, JBB);
-
- if(cstartType == NodeState::ST_SYSTEM_RESTART){
- c_redo_complete_fragments.first(fragptr);
- while(fragptr.i != RNIL){
- signal->theData[0] = fragptr.p->tabRef;
- signal->theData[1] = fragptr.p->fragId;
- sendSignal(DBACC_REF, GSN_EXPANDCHECK2, signal, 2, JBB);
- c_redo_complete_fragments.next(fragptr);
- }
- c_redo_complete_fragments.remove();
- }
+ StartRecConf::SignalLength, JBB);
} else {
ndbrequire(false);
}//if
diff --git a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
index 9bc916c8c22..e19e43f6d47 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
@@ -254,6 +254,8 @@ inline const Uint32* ALIGN_WORD(const void* ptr)
#define ZSTORED_PROCEDURE_FREE 0xffff
#define ZMIN_PAGE_LIMIT_TUP_COMMITREQ 2
+#define ZSKIP_TUX_TRIGGERS 0x1 // flag for TUP_ABORTREQ
+
#endif
class Dbtup: public SimulatedBlock {
@@ -604,6 +606,7 @@ struct Fragrecord {
Uint32 currentPageRange;
Uint32 rootPageRange;
Uint32 noOfPages;
+ Uint32 noOfVarPages;
Uint32 noOfPagesToGrow;
DLList<Page>::Head emptyPrimPage; // allocated pages (not init)
@@ -2154,6 +2157,7 @@ private:
//------------------------------------------------------------------
//------------------------------------------------------------------
void tupkeyErrorLab(Signal* signal);
+ void do_tup_abortreq(Signal*, Uint32 flags);
//------------------------------------------------------------------
//------------------------------------------------------------------
@@ -2372,15 +2376,18 @@ private:
// Public methods
Uint32 getTabDescrOffsets(const Tablerec* regTabPtr, Uint32* offset);
Uint32 allocTabDescr(const Tablerec* regTabPtr, Uint32* offset);
- void freeTabDescr(Uint32 retRef, Uint32 retNo);
+ void freeTabDescr(Uint32 retRef, Uint32 retNo, bool normal = true);
Uint32 getTabDescrWord(Uint32 index);
void setTabDescrWord(Uint32 index, Uint32 word);
// Private methods
Uint32 sizeOfReadFunction();
void removeTdArea(Uint32 tabDesRef, Uint32 list);
- void insertTdArea(Uint32 sizeOfChunk, Uint32 tabDesRef, Uint32 list);
- Uint32 itdaMergeTabDescr(Uint32 retRef, Uint32 retNo);
+ void insertTdArea(Uint32 tabDesRef, Uint32 list);
+ void itdaMergeTabDescr(Uint32& retRef, Uint32& retNo, bool normal);
+#ifdef VM_TRACE
+ void verifytabdes();
+#endif
void seizeOpRec(OperationrecPtr& regOperPtr);
void seizeFragrecord(FragrecordPtr& regFragPtr);
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp
index aa55985c1f2..16c51f7aa6e 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp
@@ -63,11 +63,16 @@ void Dbtup::freeAttrinbufrec(Uint32 anAttrBuf)
*/
void Dbtup::execTUP_ABORTREQ(Signal* signal)
{
+ ljamEntry();
+ do_tup_abortreq(signal, 0);
+}
+
+void Dbtup::do_tup_abortreq(Signal* signal, Uint32 flags)
+{
OperationrecPtr regOperPtr;
FragrecordPtr regFragPtr;
TablerecPtr regTabPtr;
- ljamEntry();
regOperPtr.i = signal->theData[0];
c_operation_pool.getPtr(regOperPtr);
TransState trans_state= get_trans_state(regOperPtr.p);
@@ -91,7 +96,8 @@ void Dbtup::execTUP_ABORTREQ(Signal* signal)
if (get_tuple_state(regOperPtr.p) == TUPLE_PREPARED)
{
ljam();
- if (!regTabPtr.p->tuxCustomTriggers.isEmpty())
+ if (!regTabPtr.p->tuxCustomTriggers.isEmpty() &&
+ (flags & ZSKIP_TUX_TRIGGERS) == 0)
executeTuxAbortTriggers(signal,
regOperPtr.p,
regFragPtr.p,
@@ -103,7 +109,8 @@ void Dbtup::execTUP_ABORTREQ(Signal* signal)
ljam();
c_operation_pool.getPtr(loopOpPtr);
if (get_tuple_state(loopOpPtr.p) != TUPLE_ALREADY_ABORTED &&
- !regTabPtr.p->tuxCustomTriggers.isEmpty()) {
+ !regTabPtr.p->tuxCustomTriggers.isEmpty() &&
+ (flags & ZSKIP_TUX_TRIGGERS) == 0) {
ljam();
executeTuxAbortTriggers(signal,
loopOpPtr.p,
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
index 3021799fd11..6cde3e51e5a 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
@@ -747,6 +747,20 @@ void Dbtup::execTUPKEYREQ(Signal* signal)
regFragPtr,
regTabPtr) != 0) {
jam();
+ /*
+ * TUP insert succeeded but add of TUX entries failed. All
+ * TUX changes have been rolled back at this point.
+ *
+ * We will abort via tupkeyErrorLab() as usual. This routine
+ * however resets the operation to ZREAD. The TUP_ABORTREQ
+ * arriving later cannot then undo the insert.
+ *
+ * Therefore we call TUP_ABORTREQ already now. Diskdata etc
+ * should be in memory and timeslicing cannot occur. We must
+ * skip TUX abort triggers since TUX is already aborted.
+ */
+ signal->theData[0] = operPtr.i;
+ do_tup_abortreq(signal, ZSKIP_TUX_TRIGGERS);
tupkeyErrorLab(signal);
return;
}
@@ -775,6 +789,11 @@ void Dbtup::execTUPKEYREQ(Signal* signal)
regFragPtr,
regTabPtr) != 0) {
jam();
+ /*
+ * See insert case.
+ */
+ signal->theData[0] = operPtr.i;
+ do_tup_abortreq(signal, ZSKIP_TUX_TRIGGERS);
tupkeyErrorLab(signal);
return;
}
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
index 88845a6ef64..8db8f423817 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
@@ -857,6 +857,11 @@ void
Dbtup::execDROP_TAB_REQ(Signal* signal)
{
ljamEntry();
+ if (ERROR_INSERTED(4013)) {
+#ifdef VM_TRACE
+ verifytabdes();
+#endif
+ }
DropTabReq* req= (DropTabReq*)signal->getDataPtr();
TablerecPtr tabPtr;
@@ -1109,7 +1114,6 @@ Dbtup::drop_fragment_free_var_pages(Signal* signal)
}
releaseFragPages(fragPtr.p);
-
Uint32 i;
for(i= 0; i<MAX_FRAG_PER_NODE; i++)
if(tabPtr.p->fragrec[i] == fragPtr.i)
@@ -1152,7 +1156,6 @@ Dbtup::start_restore_lcp(Uint32 tableId, Uint32 fragId)
tabPtr.p->m_attributes[DD].m_no_of_fixsize = 0;
tabPtr.p->m_attributes[DD].m_no_of_varsize = 0;
}
-
void
Dbtup::complete_restore_lcp(Uint32 tableId, Uint32 fragId)
{
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp
index 90fdd8c69d7..82bac432545 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp
@@ -351,6 +351,7 @@ void Dbtup::initFragRange(Fragrecord* const regFragPtr)
regFragPtr->rootPageRange = RNIL;
regFragPtr->currentPageRange = RNIL;
regFragPtr->noOfPages = 0;
+ regFragPtr->noOfVarPages = 0;
regFragPtr->noOfPagesToGrow = 2;
regFragPtr->nextStartRange = 0;
}//initFragRange()
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
index 940ccf54ba7..677eff53559 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
@@ -1135,13 +1135,20 @@ Dbtup::read_pseudo(Uint32 attrId,
case AttributeHeader::FRAGMENT:
* outBuffer = fragptr.p->fragmentId;
return 1;
- case AttributeHeader::FRAGMENT_MEMORY:
- {
- Uint64 tmp= fragptr.p->noOfPages;
- tmp*= 32768;
- memcpy(outBuffer,&tmp,8);
- }
- return 2;
+ case AttributeHeader::FRAGMENT_FIXED_MEMORY:
+ {
+ Uint64 tmp= fragptr.p->noOfPages;
+ tmp*= 32768;
+ memcpy(outBuffer,&tmp,8);
+ }
+ return 2;
+ case AttributeHeader::FRAGMENT_VARSIZED_MEMORY:
+ {
+ Uint64 tmp= fragptr.p->noOfVarPages;
+ tmp*= 32768;
+ memcpy(outBuffer,&tmp,8);
+ }
+ return 2;
case AttributeHeader::ROW_SIZE:
* outBuffer = tabptr.p->m_offsets[MM].m_fix_header_size << 2;
return 1;
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp
index 3754942bb99..15a01e82d38 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp
@@ -24,13 +24,15 @@
#define ljam() { jamLine(22000 + __LINE__); }
#define ljamEntry() { jamEntryLine(22000 + __LINE__); }
-/* **************************************************************** */
-/* *********** TABLE DESCRIPTOR MEMORY MANAGER ******************** */
-/* **************************************************************** */
-/* This module is used to allocate and deallocate table descriptor */
-/* memory attached to fragments (could be allocated per table */
-/* instead. Performs its task by a buddy algorithm. */
-/* **************************************************************** */
+/*
+ * TABLE DESCRIPTOR MEMORY MANAGER
+ *
+ * Each table has a descriptor which is a contiguous array of words.
+ * The descriptor is allocated from a global array using a buddy
+ * algorithm. Free lists exist for each power of 2 words. Freeing
+ * a piece first merges with free right and left neighbours and then
+ * divides itself up into free list chunks.
+ */
Uint32
Dbtup::getTabDescrOffsets(const Tablerec* regTabPtr, Uint32* offset)
@@ -59,7 +61,7 @@ Uint32 Dbtup::allocTabDescr(const Tablerec* regTabPtr, Uint32* offset)
Uint32 reference = RNIL;
Uint32 allocSize = getTabDescrOffsets(regTabPtr, offset);
/* ---------------------------------------------------------------- */
-/* ALWAYS ALLOCATE A MULTIPLE OF 16 BYTES */
+/* ALWAYS ALLOCATE A MULTIPLE OF 16 WORDS */
/* ---------------------------------------------------------------- */
allocSize = (((allocSize - 1) >> 4) + 1) << 4;
Uint32 list = nextHigherTwoLog(allocSize - 1); /* CALCULATE WHICH LIST IT BELONGS TO */
@@ -72,9 +74,9 @@ Uint32 Dbtup::allocTabDescr(const Tablerec* regTabPtr, Uint32* offset)
Uint32 retNo = (1 << i) - allocSize; /* CALCULATE THE DIFFERENCE */
if (retNo >= ZTD_FREE_SIZE) {
ljam();
- Uint32 retRef = reference + allocSize; /* SET THE RETURN POINTER */
- retNo = itdaMergeTabDescr(retRef, retNo); /* MERGE WITH POSSIBLE RIGHT NEIGHBOURS */
- freeTabDescr(retRef, retNo); /* RETURN UNUSED TD SPACE TO THE TD AREA */
+ // return unused words, of course without attempting left merge
+ Uint32 retRef = reference + allocSize;
+ freeTabDescr(retRef, retNo, false);
} else {
ljam();
allocSize = 1 << i;
@@ -100,17 +102,19 @@ Uint32 Dbtup::allocTabDescr(const Tablerec* regTabPtr, Uint32* offset)
}//if
}//Dbtup::allocTabDescr()
-void Dbtup::freeTabDescr(Uint32 retRef, Uint32 retNo)
+void Dbtup::freeTabDescr(Uint32 retRef, Uint32 retNo, bool normal)
{
+ itdaMergeTabDescr(retRef, retNo, normal); /* MERGE WITH POSSIBLE NEIGHBOURS */
while (retNo >= ZTD_FREE_SIZE) {
ljam();
Uint32 list = nextHigherTwoLog(retNo);
list--; /* RETURN TO NEXT LOWER LIST */
Uint32 sizeOfChunk = 1 << list;
- insertTdArea(sizeOfChunk, retRef, list);
+ insertTdArea(retRef, list);
retRef += sizeOfChunk;
retNo -= sizeOfChunk;
}//while
+ ndbassert(retNo == 0);
}//Dbtup::freeTabDescr()
Uint32
@@ -127,7 +131,7 @@ Dbtup::setTabDescrWord(Uint32 index, Uint32 word)
tableDescriptor[index].tabDescr = word;
}//Dbtup::setTabDescrWord()
-void Dbtup::insertTdArea(Uint32 sizeOfChunk, Uint32 tabDesRef, Uint32 list)
+void Dbtup::insertTdArea(Uint32 tabDesRef, Uint32 list)
{
ndbrequire(list < 16);
setTabDescrWord(tabDesRef + ZTD_FL_HEADER, ZTD_TYPE_FREE);
@@ -144,19 +148,14 @@ void Dbtup::insertTdArea(Uint32 sizeOfChunk, Uint32 tabDesRef, Uint32 list)
setTabDescrWord((tabDesRef + (1 << list)) - ZTD_TR_SIZE, 1 << list);
}//Dbtup::insertTdArea()
-/* ---------------------------------------------------------------- */
-/* ----------------------- MERGE_TAB_DESCR ------------------------ */
-/* ---------------------------------------------------------------- */
-/* INPUT: TAB_DESCR_PTR POINTING AT THE CURRENT CHUNK */
-/* */
-/* SHORTNAME: MTD */
-/* -----------------------------------------------------------------*/
-Uint32 Dbtup::itdaMergeTabDescr(Uint32 retRef, Uint32 retNo)
+/*
+ * Merge to-be-removed chunk (which need not be initialized with header
+ * and trailer) with left and right buddies. The start point retRef
+ * moves to left and the size retNo increases to match the new chunk.
+ */
+void Dbtup::itdaMergeTabDescr(Uint32& retRef, Uint32& retNo, bool normal)
{
- /* THE SIZE OF THE PART TO MERGE MUST BE OF THE SAME SIZE AS THE INSERTED PART */
- /* THIS IS TRUE EITHER IF ONE PART HAS THE SAME SIZE OR THE SUM OF BOTH PARTS */
- /* TOGETHER HAS THE SAME SIZE AS THE PART TO BE INSERTED */
- /* FIND THE SIZES OF THE PARTS TO THE RIGHT OF THE PART TO BE REINSERTED */
+ // merge right
while ((retRef + retNo) < cnoOfTabDescrRec) {
ljam();
Uint32 tabDesRef = retRef + retNo;
@@ -170,11 +169,28 @@ Uint32 Dbtup::itdaMergeTabDescr(Uint32 retRef, Uint32 retNo)
removeTdArea(tabDesRef, list);
} else {
ljam();
- return retNo;
- }//if
- }//while
- ndbrequire((retRef + retNo) == cnoOfTabDescrRec);
- return retNo;
+ break;
+ }
+ }
+ // merge left
+ const bool mergeLeft = normal;
+ while (mergeLeft && retRef > 0) {
+ ljam();
+ Uint32 trailerWord = getTabDescrWord(retRef - ZTD_TR_TYPE);
+ if (trailerWord == ZTD_TYPE_FREE) {
+ ljam();
+ Uint32 sizeOfMergedPart = getTabDescrWord(retRef - ZTD_TR_SIZE);
+ ndbrequire(retRef >= sizeOfMergedPart);
+ retRef -= sizeOfMergedPart;
+ retNo += sizeOfMergedPart;
+ Uint32 list = nextHigherTwoLog(sizeOfMergedPart - 1);
+ removeTdArea(retRef, list);
+ } else {
+ ljam();
+ break;
+ }
+ }
+ ndbrequire((retRef + retNo) <= cnoOfTabDescrRec);
}//Dbtup::itdaMergeTabDescr()
/* ---------------------------------------------------------------- */
@@ -210,3 +226,94 @@ void Dbtup::removeTdArea(Uint32 tabDesRef, Uint32 list)
setTabDescrWord(tabDescrPrevPtr + ZTD_FL_NEXT, tabDescrNextPtr);
}//if
}//Dbtup::removeTdArea()
+
+#ifdef VM_TRACE
+void
+Dbtup::verifytabdes()
+{
+ struct WordType {
+ short fl; // free list 0-15
+ short ti; // table id
+ WordType() : fl(-1), ti(-1) {}
+ };
+ WordType* wt = new WordType [cnoOfTabDescrRec];
+ uint free_frags = 0;
+ // free lists
+ {
+ for (uint i = 0; i < 16; i++) {
+ Uint32 desc2 = RNIL;
+ Uint32 desc = cfreeTdList[i];
+ while (desc != RNIL) {
+ const Uint32 size = (1 << i);
+ ndbrequire(size >= ZTD_FREE_SIZE);
+ ndbrequire(desc + size <= cnoOfTabDescrRec);
+ { Uint32 index = desc + ZTD_FL_HEADER;
+ ndbrequire(tableDescriptor[index].tabDescr == ZTD_TYPE_FREE);
+ }
+ { Uint32 index = desc + ZTD_FL_SIZE;
+ ndbrequire(tableDescriptor[index].tabDescr == size);
+ }
+ { Uint32 index = desc + size - ZTD_TR_TYPE;
+ ndbrequire(tableDescriptor[index].tabDescr == ZTD_TYPE_FREE);
+ }
+ { Uint32 index = desc + size - ZTD_TR_SIZE;
+ ndbrequire(tableDescriptor[index].tabDescr == size);
+ }
+ { Uint32 index = desc + ZTD_FL_PREV;
+ ndbrequire(tableDescriptor[index].tabDescr == desc2);
+ }
+ for (uint j = 0; j < size; j++) {
+ ndbrequire(wt[desc + j].fl == -1);
+ wt[desc + j].fl = i;
+ }
+ desc2 = desc;
+ desc = tableDescriptor[desc + ZTD_FL_NEXT].tabDescr;
+ free_frags++;
+ }
+ }
+ }
+ // tables
+ {
+ for (uint i = 0; i < cnoOfTablerec; i++) {
+ TablerecPtr ptr;
+ ptr.i = i;
+ ptrAss(ptr, tablerec);
+ if (ptr.p->tableStatus == DEFINED) {
+ Uint32 offset[10];
+ const Uint32 alloc = getTabDescrOffsets(ptr.p, offset);
+ const Uint32 desc = ptr.p->readKeyArray - offset[3];
+ Uint32 size = alloc;
+ if (size % ZTD_FREE_SIZE != 0)
+ size += ZTD_FREE_SIZE - size % ZTD_FREE_SIZE;
+ ndbrequire(desc + size <= cnoOfTabDescrRec);
+ { Uint32 index = desc + ZTD_FL_HEADER;
+ ndbrequire(tableDescriptor[index].tabDescr == ZTD_TYPE_NORMAL);
+ }
+ { Uint32 index = desc + ZTD_FL_SIZE;
+ ndbrequire(tableDescriptor[index].tabDescr == size);
+ }
+ { Uint32 index = desc + size - ZTD_TR_TYPE;
+ ndbrequire(tableDescriptor[index].tabDescr == ZTD_TYPE_NORMAL);
+ }
+ { Uint32 index = desc + size - ZTD_TR_SIZE;
+ ndbrequire(tableDescriptor[index].tabDescr == size);
+ }
+ for (uint j = 0; j < size; j++) {
+ ndbrequire(wt[desc + j].ti == -1);
+ wt[desc + j].ti = i;
+ }
+ }
+ }
+ }
+ // all words
+ {
+ for (uint i = 0; i < cnoOfTabDescrRec; i++) {
+ bool is_fl = wt[i].fl != -1;
+ bool is_ti = wt[i].ti != -1;
+ ndbrequire(is_fl != is_ti);
+ }
+ }
+ delete [] wt;
+ ndbout << "verifytabdes: frags=" << free_frags << endl;
+}
+#endif
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
index 62cd1f06157..0e1b251d8a5 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
@@ -1111,6 +1111,12 @@ Dbtup::addTuxEntries(Signal* signal,
Operationrec* regOperPtr,
Tablerec* regTabPtr)
{
+ if (ERROR_INSERTED(4022)) {
+ ljam();
+ CLEAR_ERROR_INSERT_VALUE;
+ terrorCode = 9999;
+ return -1;
+ }
TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
const DLList<TupTriggerData>& triggerList = regTabPtr->tuxCustomTriggers;
TriggerPtr triggerPtr;
@@ -1120,6 +1126,14 @@ Dbtup::addTuxEntries(Signal* signal,
ljam();
req->indexId = triggerPtr.p->indexId;
req->errorCode = RNIL;
+ if (ERROR_INSERTED(4023) &&
+ ! triggerList.hasNext(triggerPtr)) {
+ ljam();
+ CLEAR_ERROR_INSERT_VALUE;
+ terrorCode = 9999;
+ failPtrI = triggerPtr.i;
+ goto fail;
+ }
EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ,
signal, TuxMaintReq::SignalLength);
ljamEntry();
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp
index 52ab66b5c0e..5f6dd68956a 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp
@@ -302,6 +302,7 @@ Dbtup::get_empty_var_page(Fragrecord* fragPtr)
Uint32 cnt;
allocConsPages(10, cnt, ptr.i);
+ fragPtr->noOfVarPages+= cnt;
if (unlikely(cnt == 0))
{
return RNIL;
diff --git a/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp b/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp
index 70c0fdfc988..de080237668 100644
--- a/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp
+++ b/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp
@@ -248,6 +248,7 @@ private:
void execAPI_FAILCONF(Signal* signal);
void execREAD_NODESREQ(Signal* signal);
void execSET_VAR_REQ(Signal* signal);
+ void execAPI_FAILREQ(Signal* signal);
void execREAD_NODESREF(Signal* signal);
void execREAD_NODESCONF(Signal* signal);
diff --git a/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp b/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp
index 6ee24561b0a..8ec5e681045 100644
--- a/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp
+++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp
@@ -81,6 +81,7 @@ Qmgr::Qmgr(Block_context& ctx)
addRecSignal(GSN_API_REGREQ, &Qmgr::execAPI_REGREQ);
addRecSignal(GSN_API_VERSION_REQ, &Qmgr::execAPI_VERSION_REQ);
addRecSignal(GSN_DISCONNECT_REP, &Qmgr::execDISCONNECT_REP);
+ addRecSignal(GSN_API_FAILREQ, &Qmgr::execAPI_FAILREQ);
addRecSignal(GSN_API_FAILCONF, &Qmgr::execAPI_FAILCONF);
addRecSignal(GSN_READ_NODESREQ, &Qmgr::execREAD_NODESREQ);
addRecSignal(GSN_SET_VAR_REQ, &Qmgr::execSET_VAR_REQ);
diff --git a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
index 1eac369ec65..0da2de3b7a2 100644
--- a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
+++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
@@ -438,6 +438,7 @@ void Qmgr::execCONNECT_REP(Signal* signal)
void
Qmgr::execREAD_NODESCONF(Signal* signal)
{
+ jamEntry();
check_readnodes_reply(signal,
refToNode(signal->getSendersBlockRef()),
GSN_READ_NODESCONF);
@@ -446,6 +447,7 @@ Qmgr::execREAD_NODESCONF(Signal* signal)
void
Qmgr::execREAD_NODESREF(Signal* signal)
{
+ jamEntry();
check_readnodes_reply(signal,
refToNode(signal->getSendersBlockRef()),
GSN_READ_NODESREF);
@@ -907,9 +909,9 @@ retry:
char buf[255];
BaseString::snprintf(buf, sizeof(buf),
- "Partitioned cluster! check StartPartialTimeout, "
- " node %d thinks %d is president, "
- " I think president is: %d",
+ "check StartPartialTimeout, "
+ "node %d thinks %d is president, "
+ "I think president is: %d",
nodeId, president, cpresident);
ndbout_c(buf);
@@ -941,7 +943,7 @@ retry:
CRASH_INSERTION(932);
progError(__LINE__,
- NDBD_EXIT_ARBIT_SHUTDOWN,
+ NDBD_EXIT_PARTITIONED_SHUTDOWN,
buf);
ndbrequire(false);
@@ -2338,6 +2340,8 @@ void Qmgr::sendApiFailReq(Signal* signal, Uint16 failedNodeNo)
ndbrequire(failedNodePtr.p->failState == NORMAL);
failedNodePtr.p->failState = WAITING_FOR_FAILCONF1;
+ NodeReceiverGroup rg(QMGR, c_clusterNodes);
+ sendSignal(rg, GSN_API_FAILREQ, signal, 2, JBA);
sendSignal(DBTC_REF, GSN_API_FAILREQ, signal, 2, JBA);
sendSignal(DBDICT_REF, GSN_API_FAILREQ, signal, 2, JBA);
sendSignal(SUMA_REF, GSN_API_FAILREQ, signal, 2, JBA);
@@ -2361,6 +2365,27 @@ void Qmgr::sendApiFailReq(Signal* signal, Uint16 failedNodeNo)
CloseComReqConf::SignalLength, JBA);
}//Qmgr::sendApiFailReq()
+void Qmgr::execAPI_FAILREQ(Signal* signal)
+{
+ jamEntry();
+ NodeRecPtr failedNodePtr;
+ failedNodePtr.i = signal->theData[0];
+ // signal->theData[1] == QMGR_REF
+ ptrCheckGuard(failedNodePtr, MAX_NODES, nodeRec);
+
+ ndbrequire(getNodeInfo(failedNodePtr.i).getType() != NodeInfo::DB);
+
+ // ignore if api not active
+ if (failedNodePtr.p->phase != ZAPI_ACTIVE)
+ return;
+
+ signal->theData[0] = NDB_LE_Disconnected;
+ signal->theData[1] = failedNodePtr.i;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
+
+ node_failed(signal, failedNodePtr.i);
+}
+
void Qmgr::execAPI_FAILCONF(Signal* signal)
{
NodeRecPtr failedNodePtr;
@@ -2798,7 +2823,7 @@ void Qmgr::failReportLab(Signal* signal, Uint16 aFailedNode,
break;
case FailRep::ZPARTITIONED_CLUSTER:
{
- code = NDBD_EXIT_ARBIT_SHUTDOWN;
+ code = NDBD_EXIT_PARTITIONED_SHUTDOWN;
char buf1[100], buf2[100];
c_clusterNodes.getText(buf1);
if (signal->getLength()== FailRep::SignalLength + FailRep::ExtraLength &&
@@ -2809,16 +2834,14 @@ void Qmgr::failReportLab(Signal* signal, Uint16 aFailedNode,
part.assign(NdbNodeBitmask::Size, rep->partition);
part.getText(buf2);
BaseString::snprintf(extra, sizeof(extra),
- "Partitioned cluster!"
- " Our cluster: %s other cluster: %s",
+ "Our cluster: %s other cluster: %s",
buf1, buf2);
}
else
{
jam();
BaseString::snprintf(extra, sizeof(extra),
- "Partitioned cluster!"
- " Our cluster: %s ", buf1);
+ "Our cluster: %s", buf1);
}
msg = extra;
break;
diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.cpp b/storage/ndb/src/kernel/blocks/suma/Suma.cpp
index 42666a9e5d9..2b746fdbdd8 100644
--- a/storage/ndb/src/kernel/blocks/suma/Suma.cpp
+++ b/storage/ndb/src/kernel/blocks/suma/Suma.cpp
@@ -1445,12 +1445,13 @@ Suma::initTable(Signal *signal, Uint32 tableId, TablePtr &tabPtr)
tabPtr.p->m_error = 0;
tabPtr.p->m_schemaVersion = RNIL;
tabPtr.p->m_state = Table::DEFINING;
- tabPtr.p->m_hasTriggerDefined[0] = 0;
- tabPtr.p->m_hasTriggerDefined[1] = 0;
- tabPtr.p->m_hasTriggerDefined[2] = 0;
- tabPtr.p->m_triggerIds[0] = ILLEGAL_TRIGGER_ID;
- tabPtr.p->m_triggerIds[1] = ILLEGAL_TRIGGER_ID;
- tabPtr.p->m_triggerIds[2] = ILLEGAL_TRIGGER_ID;
+ tabPtr.p->m_drop_subbPtr.p = 0;
+ for (int j= 0; j < 3; j++)
+ {
+ tabPtr.p->m_hasTriggerDefined[j] = 0;
+ tabPtr.p->m_hasOutstandingTriggerReq[j] = 0;
+ tabPtr.p->m_triggerIds[j] = ILLEGAL_TRIGGER_ID;
+ }
c_tables.add(tabPtr);
@@ -2491,6 +2492,13 @@ Suma::execSUB_STOP_REQ(Signal* signal){
DBUG_VOID_RETURN;
}
+ if (tabPtr.p->m_drop_subbPtr.p != 0) {
+ jam();
+ DBUG_PRINT("error", ("table locked"));
+ sendSubStopRef(signal, 1420);
+ DBUG_VOID_RETURN;
+ }
+
DBUG_PRINT("info",("subscription: %u tableId: %u[i=%u] id: %u key: %u",
subPtr.i, subPtr.p->m_tableId, tabPtr.i,
subPtr.p->m_subscriptionId,subPtr.p->m_subscriptionKey));
@@ -2543,7 +2551,7 @@ Suma::execSUB_STOP_REQ(Signal* signal){
subPtr.p->m_senderRef = senderRef; // store ref to requestor
subPtr.p->m_senderData = senderData; // store ref to requestor
- tabPtr.p->m_drop_subbPtr= subbPtr;
+ tabPtr.p->m_drop_subbPtr = subbPtr;
if (subPtr.p->m_state == Subscription::DEFINED)
{
@@ -2560,6 +2568,7 @@ Suma::execSUB_STOP_REQ(Signal* signal){
tabPtr.p->m_tableId, tabPtr.p->n_subscribers));
tabPtr.p->checkRelease(*this);
sendSubStopComplete(signal, tabPtr.p->m_drop_subbPtr);
+ tabPtr.p->m_drop_subbPtr.p = 0;
}
else
{
@@ -2667,7 +2676,8 @@ Suma::reportAllSubscribers(Signal *signal,
{
SubTableData * data = (SubTableData*)signal->getDataPtrSend();
- if (table_event == NdbDictionary::Event::_TE_SUBSCRIBE)
+ if (table_event == NdbDictionary::Event::_TE_SUBSCRIBE &&
+ !c_startup.m_restart_server_node_id)
{
data->gci = m_last_complete_gci + 1;
data->tableId = subPtr.p->m_tableId;
@@ -2893,6 +2903,9 @@ Suma::Table::dropTrigger(Signal* signal,Suma& suma)
jam();
DBUG_ENTER("Suma::dropTrigger");
+ m_hasOutstandingTriggerReq[0] =
+ m_hasOutstandingTriggerReq[1] =
+ m_hasOutstandingTriggerReq[2] = 1;
for(Uint32 j = 0; j<3; j++){
jam();
suma.suma_ndbrequire(m_triggerIds[j] != ILLEGAL_TRIGGER_ID);
@@ -2971,14 +2984,18 @@ Suma::Table::runDropTrigger(Signal* signal,
suma.suma_ndbrequire(type < 3);
suma.suma_ndbrequire(m_triggerIds[type] == triggerId);
+ suma.suma_ndbrequire(m_hasTriggerDefined[type] > 0);
+ suma.suma_ndbrequire(m_hasOutstandingTriggerReq[type] == 1);
m_hasTriggerDefined[type]--;
+ m_hasOutstandingTriggerReq[type] = 0;
if (m_hasTriggerDefined[type] == 0)
{
jam();
m_triggerIds[type] = ILLEGAL_TRIGGER_ID;
}
- if( m_hasTriggerDefined[0] != m_hasTriggerDefined[1] ||
- m_hasTriggerDefined[0] != m_hasTriggerDefined[2])
+ if( m_hasOutstandingTriggerReq[0] ||
+ m_hasOutstandingTriggerReq[1] ||
+ m_hasOutstandingTriggerReq[2])
{
// more to come
jam();
@@ -2996,6 +3013,7 @@ Suma::Table::runDropTrigger(Signal* signal,
checkRelease(suma);
suma.sendSubStopComplete(signal, m_drop_subbPtr);
+ m_drop_subbPtr.p = 0;
}
void Suma::suma_ndbrequire(bool v) { ndbrequire(v); }
@@ -3550,13 +3568,17 @@ Suma::execDROP_TAB_CONF(Signal *signal)
DBUG_PRINT("info",("drop table id: %d[i=%u]", tableId, tabPtr.i));
tabPtr.p->m_state = Table::DROPPED;
- tabPtr.p->m_hasTriggerDefined[0] = 0;
- tabPtr.p->m_hasTriggerDefined[1] = 0;
- tabPtr.p->m_hasTriggerDefined[2] = 0;
- tabPtr.p->m_triggerIds[0] = ILLEGAL_TRIGGER_ID;
- tabPtr.p->m_triggerIds[1] = ILLEGAL_TRIGGER_ID;
- tabPtr.p->m_triggerIds[2] = ILLEGAL_TRIGGER_ID;
-
+ for (int j= 0; j < 3; j++)
+ {
+ if (!tabPtr.p->m_hasOutstandingTriggerReq[j])
+ {
+ tabPtr.p->m_hasTriggerDefined[j] = 0;
+ tabPtr.p->m_hasOutstandingTriggerReq[j] = 0;
+ tabPtr.p->m_triggerIds[j] = ILLEGAL_TRIGGER_ID;
+ }
+ else
+ tabPtr.p->m_hasTriggerDefined[j] = 1;
+ }
if (senderRef == 0)
{
DBUG_VOID_RETURN;
diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.hpp b/storage/ndb/src/kernel/blocks/suma/Suma.hpp
index 51f5fa4a8c8..4408d6aff8d 100644
--- a/storage/ndb/src/kernel/blocks/suma/Suma.hpp
+++ b/storage/ndb/src/kernel/blocks/suma/Suma.hpp
@@ -301,7 +301,8 @@ public:
union { Uint32 m_tableId; Uint32 key; };
Uint32 m_schemaVersion;
- Uint32 m_hasTriggerDefined[3]; // Insert/Update/Delete
+ Uint8 m_hasTriggerDefined[3]; // Insert/Update/Delete
+ Uint8 m_hasOutstandingTriggerReq[3]; // Insert/Update/Delete
Uint32 m_triggerIds[3]; // Insert/Update/Delete
Uint32 m_error;
diff --git a/storage/ndb/src/kernel/error/TimeModule.cpp b/storage/ndb/src/kernel/error/TimeModule.cpp
index 4bd8e3daf99..c4e569e7221 100644
--- a/storage/ndb/src/kernel/error/TimeModule.cpp
+++ b/storage/ndb/src/kernel/error/TimeModule.cpp
@@ -22,7 +22,7 @@
static const char* cMonth[] = { "x", "January", "February", "Mars", "April", "May", "June",
"July", "August", "September", "October", "November", "December"};
-static const char* cDay[] = { "x", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday",
+static const char* cDay[] = { "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday",
"Saturday", "Sunday"};
static const char* cHour[] = { "00","01","02","03","04","05","06","07","08","09","10","11","12",
diff --git a/storage/ndb/src/kernel/error/ndbd_exit_codes.c b/storage/ndb/src/kernel/error/ndbd_exit_codes.c
index 172125c35a1..2c32c31a15f 100644
--- a/storage/ndb/src/kernel/error/ndbd_exit_codes.c
+++ b/storage/ndb/src/kernel/error/ndbd_exit_codes.c
@@ -54,6 +54,8 @@ static const ErrStruct errArray[] =
{NDBD_EXIT_ARBIT_SHUTDOWN, XAE, "Node lost connection to other nodes and "
"can not form a unpartitioned cluster, please investigate if there are "
"error(s) on other node(s)"},
+ {NDBD_EXIT_PARTITIONED_SHUTDOWN, XAE, "Partitioned cluster detected. "
+ "Please check if cluster is already running"},
{NDBD_EXIT_POINTER_NOTINRANGE, XIE, "Pointer too large"},
{NDBD_EXIT_SR_OTHERNODEFAILED, XRE, "Another node failed during system "
"restart, please investigate error(s) on other node(s)"},
diff --git a/storage/ndb/src/kernel/vm/SimulatedBlock.cpp b/storage/ndb/src/kernel/vm/SimulatedBlock.cpp
index 1de47197867..4e01038d343 100644
--- a/storage/ndb/src/kernel/vm/SimulatedBlock.cpp
+++ b/storage/ndb/src/kernel/vm/SimulatedBlock.cpp
@@ -921,6 +921,15 @@ SimulatedBlock::execCONTINUE_FRAGMENTED(Signal * signal){
void
SimulatedBlock::execNODE_START_REP(Signal* signal)
{
+ // common stuff for all blocks
+
+ // block specific stuff by virtual method override (default empty)
+ exec_node_start_rep(signal);
+}
+
+void
+SimulatedBlock::exec_node_start_rep(Signal* signal)
+{
}
void
diff --git a/storage/ndb/src/kernel/vm/SimulatedBlock.hpp b/storage/ndb/src/kernel/vm/SimulatedBlock.hpp
index ab698f7826c..3e90b20705e 100644
--- a/storage/ndb/src/kernel/vm/SimulatedBlock.hpp
+++ b/storage/ndb/src/kernel/vm/SimulatedBlock.hpp
@@ -446,7 +446,8 @@ private:
void execCONTINUE_FRAGMENTED(Signal* signal);
void execAPI_START_REP(Signal* signal);
void execNODE_START_REP(Signal* signal);
-
+ virtual void exec_node_start_rep(Signal* signal);
+
Uint32 c_fragmentIdCounter;
ArrayPool<FragmentInfo> c_fragmentInfoPool;
DLHashTable<FragmentInfo> c_fragmentInfoHash;
diff --git a/storage/ndb/src/ndbapi/ClusterMgr.cpp b/storage/ndb/src/ndbapi/ClusterMgr.cpp
index 63fdb73c49f..49815ae6c13 100644
--- a/storage/ndb/src/ndbapi/ClusterMgr.cpp
+++ b/storage/ndb/src/ndbapi/ClusterMgr.cpp
@@ -396,6 +396,8 @@ ClusterMgr::execNF_COMPLETEREP(const Uint32 * theData){
void
ClusterMgr::reportConnected(NodeId nodeId){
+ DBUG_ENTER("ClusterMgr::reportConnected");
+ DBUG_PRINT("info", ("nodeId: %u", nodeId));
/**
* Ensure that we are sending heartbeat every 100 ms
* until we have got the first reply from NDB providing
@@ -421,6 +423,7 @@ ClusterMgr::reportConnected(NodeId nodeId){
theNode.nfCompleteRep = true;
theFacade.ReportNodeAlive(nodeId);
+ DBUG_VOID_RETURN;
}
void
diff --git a/storage/ndb/src/ndbapi/DictCache.cpp b/storage/ndb/src/ndbapi/DictCache.cpp
index 8a0bf2f8e8b..c06bb6fc62a 100644
--- a/storage/ndb/src/ndbapi/DictCache.cpp
+++ b/storage/ndb/src/ndbapi/DictCache.cpp
@@ -312,12 +312,15 @@ GlobalDictCache::invalidate_all()
if (vers->size())
{
TableVersion * ver = & vers->back();
- ver->m_impl->m_status = NdbDictionary::Object::Invalid;
- ver->m_status = DROPPED;
- if (ver->m_refCount == 0)
+ if (ver->m_status != RETREIVING)
{
- delete ver->m_impl;
- vers->erase(vers->size() - 1);
+ ver->m_impl->m_status = NdbDictionary::Object::Invalid;
+ ver->m_status = DROPPED;
+ if (ver->m_refCount == 0)
+ {
+ delete ver->m_impl;
+ vers->erase(vers->size() - 1);
+ }
}
}
curr = m_tableHash.getNext(curr);
diff --git a/storage/ndb/src/ndbapi/Ndb.cpp b/storage/ndb/src/ndbapi/Ndb.cpp
index 5b0a9e9d330..5eddbc35665 100644
--- a/storage/ndb/src/ndbapi/Ndb.cpp
+++ b/storage/ndb/src/ndbapi/Ndb.cpp
@@ -1025,14 +1025,19 @@ int Ndb::initAutoIncrement()
setDatabaseName("sys");
setDatabaseSchemaName("def");
- m_sys_tab_0 = getDictionary()->getTableGlobal("SYSTAB_0");
+ m_sys_tab_0 = theDictionary->getTableGlobal("SYSTAB_0");
// Restore current name space
setDatabaseName(currentDb.c_str());
setDatabaseSchemaName(currentSchema.c_str());
+ if (m_sys_tab_0 == NULL) {
+ assert(theDictionary->m_error.code != 0);
+ theError.code = theDictionary->m_error.code;
+ return -1;
+ }
- return (m_sys_tab_0 == NULL);
+ return 0;
}
int
@@ -1043,19 +1048,19 @@ Ndb::opTupleIdOnNdb(const NdbTableImpl* table,
Uint32 aTableId = table->m_id;
DBUG_PRINT("enter", ("table=%u value=%llu op=%u", aTableId, opValue, op));
- NdbTransaction* tConnection;
- NdbOperation* tOperation= 0; // Compiler warning if not initialized
+ NdbTransaction* tConnection = NULL;
+ NdbOperation* tOperation = NULL;
Uint64 tValue;
NdbRecAttr* tRecAttrResult;
- CHECK_STATUS_MACRO_ZERO;
+ CHECK_STATUS_MACRO;
- if (initAutoIncrement())
- goto error_return;
+ if (initAutoIncrement() == -1)
+ goto error_handler;
tConnection = this->startTransaction();
if (tConnection == NULL)
- goto error_return;
+ goto error_handler;
tOperation = tConnection->getNdbOperation(m_sys_tab_0);
if (tOperation == NULL)
@@ -1065,7 +1070,7 @@ Ndb::opTupleIdOnNdb(const NdbTableImpl* table,
{
case 0:
tOperation->interpretedUpdateTuple();
- tOperation->equal("SYSKEY_0", aTableId );
+ tOperation->equal("SYSKEY_0", aTableId);
tOperation->incValue("NEXTID", opValue);
tRecAttrResult = tOperation->getValue("NEXTID");
@@ -1130,14 +1135,21 @@ Ndb::opTupleIdOnNdb(const NdbTableImpl* table,
DBUG_RETURN(0);
- error_handler:
- theError.code = tConnection->theError.code;
- this->closeTransaction(tConnection);
- error_return:
+error_handler:
DBUG_PRINT("error", ("ndb=%d con=%d op=%d",
theError.code,
- tConnection ? tConnection->theError.code : -1,
- tOperation ? tOperation->theError.code : -1));
+ tConnection != NULL ? tConnection->theError.code : -1,
+ tOperation != NULL ? tOperation->theError.code : -1));
+
+ if (theError.code == 0 && tConnection != NULL)
+ theError.code = tConnection->theError.code;
+ if (theError.code == 0 && tOperation != NULL)
+ theError.code = tOperation->theError.code;
+ DBUG_ASSERT(theError.code != 0);
+
+ if (tConnection != NULL)
+ this->closeTransaction(tConnection);
+
DBUG_RETURN(-1);
}
diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
index 1e33a843a42..b9c03f0b209 100644
--- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
+++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
@@ -328,9 +328,14 @@ NdbColumnImpl::create_pseudo(const char * name){
col->m_impl.m_attrId = AttributeHeader::FRAGMENT;
col->m_impl.m_attrSize = 4;
col->m_impl.m_arraySize = 1;
- } else if(!strcmp(name, "NDB$FRAGMENT_MEMORY")){
+ } else if(!strcmp(name, "NDB$FRAGMENT_FIXED_MEMORY")){
col->setType(NdbDictionary::Column::Bigunsigned);
- col->m_impl.m_attrId = AttributeHeader::FRAGMENT_MEMORY;
+ col->m_impl.m_attrId = AttributeHeader::FRAGMENT_FIXED_MEMORY;
+ col->m_impl.m_attrSize = 8;
+ col->m_impl.m_arraySize = 1;
+ } else if(!strcmp(name, "NDB$FRAGMENT_VARSIZED_MEMORY")){
+ col->setType(NdbDictionary::Column::Bigunsigned);
+ col->m_impl.m_attrId = AttributeHeader::FRAGMENT_VARSIZED_MEMORY;
col->m_impl.m_attrSize = 8;
col->m_impl.m_arraySize = 1;
} else if(!strcmp(name, "NDB$ROW_COUNT")){
@@ -1316,7 +1321,8 @@ NdbDictionaryImpl::~NdbDictionaryImpl()
m_globalHash->lock();
if(--f_dictionary_count == 0){
delete NdbDictionary::Column::FRAGMENT;
- delete NdbDictionary::Column::FRAGMENT_MEMORY;
+ delete NdbDictionary::Column::FRAGMENT_FIXED_MEMORY;
+ delete NdbDictionary::Column::FRAGMENT_VARSIZED_MEMORY;
delete NdbDictionary::Column::ROW_COUNT;
delete NdbDictionary::Column::COMMIT_COUNT;
delete NdbDictionary::Column::ROW_SIZE;
@@ -1326,7 +1332,8 @@ NdbDictionaryImpl::~NdbDictionaryImpl()
delete NdbDictionary::Column::ROWID;
delete NdbDictionary::Column::ROW_GCI;
NdbDictionary::Column::FRAGMENT= 0;
- NdbDictionary::Column::FRAGMENT_MEMORY= 0;
+ NdbDictionary::Column::FRAGMENT_FIXED_MEMORY= 0;
+ NdbDictionary::Column::FRAGMENT_VARSIZED_MEMORY= 0;
NdbDictionary::Column::ROW_COUNT= 0;
NdbDictionary::Column::COMMIT_COUNT= 0;
NdbDictionary::Column::ROW_SIZE= 0;
@@ -1483,8 +1490,10 @@ NdbDictionaryImpl::setTransporter(class Ndb* ndb,
if(f_dictionary_count++ == 0){
NdbDictionary::Column::FRAGMENT=
NdbColumnImpl::create_pseudo("NDB$FRAGMENT");
- NdbDictionary::Column::FRAGMENT_MEMORY=
- NdbColumnImpl::create_pseudo("NDB$FRAGMENT_MEMORY");
+ NdbDictionary::Column::FRAGMENT_FIXED_MEMORY=
+ NdbColumnImpl::create_pseudo("NDB$FRAGMENT_FIXED_MEMORY");
+ NdbDictionary::Column::FRAGMENT_VARSIZED_MEMORY=
+ NdbColumnImpl::create_pseudo("NDB$FRAGMENT_VARSIZED_MEMORY");
NdbDictionary::Column::ROW_COUNT=
NdbColumnImpl::create_pseudo("NDB$ROW_COUNT");
NdbDictionary::Column::COMMIT_COUNT=
@@ -5041,7 +5050,8 @@ template class Vector<NdbTableImpl*>;
template class Vector<NdbColumnImpl*>;
const NdbDictionary::Column * NdbDictionary::Column::FRAGMENT = 0;
-const NdbDictionary::Column * NdbDictionary::Column::FRAGMENT_MEMORY = 0;
+const NdbDictionary::Column * NdbDictionary::Column::FRAGMENT_FIXED_MEMORY = 0;
+const NdbDictionary::Column * NdbDictionary::Column::FRAGMENT_VARSIZED_MEMORY = 0;
const NdbDictionary::Column * NdbDictionary::Column::ROW_COUNT = 0;
const NdbDictionary::Column * NdbDictionary::Column::COMMIT_COUNT = 0;
const NdbDictionary::Column * NdbDictionary::Column::ROW_SIZE = 0;
diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp
index 271c317fcb4..06b0d7ea5b9 100644
--- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp
+++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp
@@ -942,6 +942,7 @@ NdbEventBuffer::NdbEventBuffer(Ndb *ndb) :
{
#ifdef VM_TRACE
m_latest_command= "NdbEventBuffer::NdbEventBuffer";
+ m_flush_gci = 0;
#endif
if ((p_cond = NdbCondition_Create()) == NULL) {
@@ -1109,6 +1110,8 @@ NdbEventBuffer::flushIncompleteEvents(Uint64 gci)
/**
* Find min complete gci
*/
+ // called by user thread, so we need to lock the data
+ lock();
Uint32 i;
Uint32 sz= m_active_gci.size();
Gci_container* array = (Gci_container*)m_active_gci.getBase();
@@ -1127,6 +1130,10 @@ NdbEventBuffer::flushIncompleteEvents(Uint64 gci)
bzero(tmp, sizeof(Gci_container));
}
}
+#ifdef VM_TRACE
+ m_flush_gci = gci;
+#endif
+ unlock();
return 0;
}
@@ -1301,7 +1308,11 @@ operator<<(NdbOut& out, const Gci_container_pod& gci)
static
Gci_container*
-find_bucket_chained(Vector<Gci_container_pod> * active, Uint64 gci)
+find_bucket_chained(Vector<Gci_container_pod> * active, Uint64 gci
+#ifdef VM_TRACE
+ ,Uint64 flush_gci
+#endif
+ )
{
Uint32 pos = (gci & ACTIVE_GCI_MASK);
Gci_container *bucket= ((Gci_container*)active->getBase()) + pos;
@@ -1322,6 +1333,13 @@ find_bucket_chained(Vector<Gci_container_pod> * active, Uint64 gci)
bzero(bucket, sizeof(Gci_container));
bucket->m_gci = gci;
bucket->m_gcp_complete_rep_count = ~(Uint32)0;
+#ifdef VM_TRACE
+ if (gci < flush_gci)
+ {
+ ndbout_c("received old gci %llu < flush gci %llu", gci, flush_gci);
+ assert(false);
+ }
+#endif
return bucket;
}
move_pos += ACTIVE_GCI_DIRECTORY_SIZE;
@@ -1336,7 +1354,16 @@ find_bucket_chained(Vector<Gci_container_pod> * active, Uint64 gci)
bucket += ACTIVE_GCI_DIRECTORY_SIZE;
if(bucket->m_gci == gci)
+ {
+#ifdef VM_TRACE
+ if (gci < flush_gci)
+ {
+ ndbout_c("received old gci %llu < flush gci %llu", gci, flush_gci);
+ assert(false);
+ }
+#endif
return bucket;
+ }
} while(pos < size);
@@ -1346,14 +1373,22 @@ find_bucket_chained(Vector<Gci_container_pod> * active, Uint64 gci)
inline
Gci_container*
-find_bucket(Vector<Gci_container_pod> * active, Uint64 gci)
+find_bucket(Vector<Gci_container_pod> * active, Uint64 gci
+#ifdef VM_TRACE
+ ,Uint64 flush_gci
+#endif
+ )
{
Uint32 pos = (gci & ACTIVE_GCI_MASK);
Gci_container *bucket= ((Gci_container*)active->getBase()) + pos;
if(likely(gci == bucket->m_gci))
return bucket;
- return find_bucket_chained(active,gci);
+ return find_bucket_chained(active,gci
+#ifdef VM_TRACE
+ , flush_gci
+#endif
+ );
}
static
@@ -1386,7 +1421,11 @@ NdbEventBuffer::execSUB_GCP_COMPLETE_REP(const SubGcpCompleteRep * const rep)
const Uint64 gci= rep->gci;
const Uint32 cnt= rep->gcp_complete_rep_count;
- Gci_container *bucket = find_bucket(&m_active_gci, gci);
+ Gci_container *bucket = find_bucket(&m_active_gci, gci
+#ifdef VM_TRACE
+ , m_flush_gci
+#endif
+ );
if (unlikely(bucket == 0))
{
@@ -1522,6 +1561,46 @@ NdbEventBuffer::complete_outof_order_gcis()
}
void
+NdbEventBuffer::report_node_connected(Uint32 node_id)
+{
+ NdbEventOperation* op= m_ndb->getEventOperation(0);
+ if (op == 0)
+ return;
+
+ DBUG_ENTER("NdbEventBuffer::report_node_connected");
+ SubTableData data;
+ LinearSectionPtr ptr[3];
+ bzero(&data, sizeof(data));
+ bzero(ptr, sizeof(ptr));
+
+ data.tableId = ~0;
+ data.operation = NdbDictionary::Event::_TE_ACTIVE;
+ data.req_nodeid = (Uint8)node_id;
+ data.ndbd_nodeid = (Uint8)node_id;
+ data.logType = SubTableData::LOG;
+ data.gci = m_latestGCI + 1;
+ /**
+ * Insert this event for each operation
+ */
+ {
+ // no need to lock()/unlock(), receive thread calls this
+ NdbEventOperationImpl* impl = &op->m_impl;
+ do if (!impl->m_node_bit_mask.isclear())
+ {
+ data.senderData = impl->m_oid;
+ insertDataL(impl, &data, ptr);
+ } while((impl = impl->m_next));
+ for (impl = m_dropped_ev_op; impl; impl = impl->m_next)
+ if (!impl->m_node_bit_mask.isclear())
+ {
+ data.senderData = impl->m_oid;
+ insertDataL(impl, &data, ptr);
+ }
+ }
+ DBUG_VOID_RETURN;
+}
+
+void
NdbEventBuffer::report_node_failure(Uint32 node_id)
{
NdbEventOperation* op= m_ndb->getEventOperation(0);
@@ -1579,6 +1658,10 @@ NdbEventBuffer::completeClusterFailed()
data.logType = SubTableData::LOG;
data.gci = m_latestGCI + 1;
+#ifdef VM_TRACE
+ m_flush_gci = 0;
+#endif
+
/**
* Insert this event for each operation
*/
@@ -1712,7 +1795,11 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op,
if ( likely((Uint32)op->mi_type & (1 << (Uint32)sdata->operation)) )
{
- Gci_container* bucket= find_bucket(&m_active_gci, gci);
+ Gci_container* bucket= find_bucket(&m_active_gci, gci
+#ifdef VM_TRACE
+ , m_flush_gci
+#endif
+ );
DBUG_PRINT_EVENT("info", ("data insertion in eventId %d", op->m_eventId));
DBUG_PRINT_EVENT("info", ("gci=%d tab=%d op=%d node=%d",
@@ -1809,11 +1896,18 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op,
}
// merge is on so we do not report blob part events
if (! is_blob_event) {
- // report actual operation, not composite
+ // report actual operation and the composite
// there is no way to "fix" the flags for a composite op
// since the flags represent multiple ops on multiple PKs
- EventBufData_list::Gci_op g = { op, (1 << sdata->operation) };
- bucket->m_data.add_gci_op(g);
+ // XXX fix by doing merge at end of epoch (extra mem cost)
+ {
+ EventBufData_list::Gci_op g = { op, (1 << sdata->operation) };
+ bucket->m_data.add_gci_op(g);
+ }
+ {
+ EventBufData_list::Gci_op g = { op, (1 << data->sdata->operation) };
+ bucket->m_data.add_gci_op(g);
+ }
}
}
DBUG_RETURN_EVENT(0);
diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp
index c14ca83128f..561e79a137e 100644
--- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp
+++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp
@@ -422,6 +422,7 @@ public:
void execSUB_GCP_COMPLETE_REP(const SubGcpCompleteRep * const rep);
void complete_outof_order_gcis();
+ void report_node_connected(Uint32 node_id);
void report_node_failure(Uint32 node_id);
void completeClusterFailed();
@@ -462,6 +463,7 @@ public:
#ifdef VM_TRACE
const char *m_latest_command;
+ Uint64 m_flush_gci;
#endif
Ndb *m_ndb;
diff --git a/storage/ndb/src/ndbapi/NdbScanOperation.cpp b/storage/ndb/src/ndbapi/NdbScanOperation.cpp
index 5852570a686..21caf8349b6 100644
--- a/storage/ndb/src/ndbapi/NdbScanOperation.cpp
+++ b/storage/ndb/src/ndbapi/NdbScanOperation.cpp
@@ -1574,62 +1574,6 @@ NdbScanOperation::close_impl(TransporterFacade* tp, bool forceSend,
return -1;
}
- bool holdLock = false;
- if (theSCAN_TABREQ)
- {
- ScanTabReq * req = CAST_PTR(ScanTabReq, theSCAN_TABREQ->getDataPtrSend());
- holdLock = ScanTabReq::getHoldLockFlag(req->requestInfo);
- }
-
- /**
- * When using locks, force close of scan directly
- */
- if (holdLock && theError.code == 0 &&
- (m_sent_receivers_count + m_conf_receivers_count + m_api_receivers_count))
- {
- NdbApiSignal tSignal(theNdb->theMyRef);
- tSignal.setSignal(GSN_SCAN_NEXTREQ);
-
- Uint32* theData = tSignal.getDataPtrSend();
- Uint64 transId = theNdbCon->theTransactionId;
- theData[0] = theNdbCon->theTCConPtr;
- theData[1] = 1;
- theData[2] = transId;
- theData[3] = (Uint32) (transId >> 32);
-
- tSignal.setLength(4);
- int ret = tp->sendSignal(&tSignal, nodeId);
- if (ret)
- {
- setErrorCode(4008);
- return -1;
- }
-
- /**
- * If no receiver is outstanding...
- * set it to 1 as execCLOSE_SCAN_REP resets it
- */
- m_sent_receivers_count = m_sent_receivers_count ? m_sent_receivers_count : 1;
-
- while(theError.code == 0 && (m_sent_receivers_count + m_conf_receivers_count))
- {
- int return_code = poll_guard->wait_scan(WAITFOR_SCAN_TIMEOUT, nodeId, forceSend);
- switch(return_code){
- case 0:
- break;
- case -1:
- setErrorCode(4008);
- case -2:
- m_api_receivers_count = 0;
- m_conf_receivers_count = 0;
- m_sent_receivers_count = 0;
- theNdbCon->theReleaseOnClose = true;
- return -1;
- }
- }
- return 0;
- }
-
/**
* Wait for outstanding
*/
diff --git a/storage/ndb/src/ndbapi/Ndbif.cpp b/storage/ndb/src/ndbapi/Ndbif.cpp
index ecaf6a3f435..0527744afe1 100644
--- a/storage/ndb/src/ndbapi/Ndbif.cpp
+++ b/storage/ndb/src/ndbapi/Ndbif.cpp
@@ -177,6 +177,7 @@ Ndb::executeMessage(void* NdbObject,
void Ndb::connected(Uint32 ref)
{
+// cluster connect, a_node == own reference
theMyRef= ref;
Uint32 tmpTheNode= refToNode(ref);
Uint64 tBlockNo= refToBlock(ref);
@@ -209,16 +210,30 @@ void Ndb::connected(Uint32 ref)
theNode= tmpTheNode; // flag that Ndb object is initialized
}
+void Ndb::report_node_connected(Uint32 nodeId)
+{
+ if (theEventBuffer)
+ {
+ // node connected
+ // eventOperations in the ndb object should be notified
+ theEventBuffer->report_node_connected(nodeId);
+ }
+}
+
void
Ndb::statusMessage(void* NdbObject, Uint32 a_node, bool alive, bool nfComplete)
{
DBUG_ENTER("Ndb::statusMessage");
+ DBUG_PRINT("info", ("a_node: %u alive: %u nfComplete: %u",
+ a_node, alive, nfComplete));
Ndb* tNdb = (Ndb*)NdbObject;
if (alive) {
if (nfComplete) {
+ // cluster connect, a_node == own reference
tNdb->connected(a_node);
DBUG_VOID_RETURN;
}//if
+ tNdb->report_node_connected(a_node);
} else {
if (nfComplete) {
tNdb->report_node_failure_completed(a_node);
diff --git a/storage/ndb/src/ndbapi/TransporterFacade.cpp b/storage/ndb/src/ndbapi/TransporterFacade.cpp
index 15127953051..2f421271e91 100644
--- a/storage/ndb/src/ndbapi/TransporterFacade.cpp
+++ b/storage/ndb/src/ndbapi/TransporterFacade.cpp
@@ -794,6 +794,8 @@ TransporterFacade::connected()
void
TransporterFacade::ReportNodeDead(NodeId tNodeId)
{
+ DBUG_ENTER("TransporterFacade::ReportNodeDead");
+ DBUG_PRINT("enter",("nodeid= %d", tNodeId));
/**
* When a node fails we must report this to each Ndb object.
* The function that is used for communicating node failures is called.
@@ -810,6 +812,7 @@ TransporterFacade::ReportNodeDead(NodeId tNodeId)
(*RegPC) (obj, tNodeId, false, false);
}
}
+ DBUG_VOID_RETURN;
}
void
diff --git a/storage/ndb/src/ndbapi/ndberror.c b/storage/ndb/src/ndbapi/ndberror.c
index 486d78538f0..d0d26c19cfa 100644
--- a/storage/ndb/src/ndbapi/ndberror.c
+++ b/storage/ndb/src/ndbapi/ndberror.c
@@ -484,6 +484,8 @@ ErrorBundle ErrorCodes[] = {
{ 1418, DMEC, SE, "Subscription dropped, no new subscribers allowed" },
{ 1419, DMEC, SE, "Subscription already dropped" },
+ { 1420, DMEC, TR, "Subscriber manager busy with adding/removing a table" },
+
{ 4004, DMEC, AE, "Attribute name not found in the Table" },
{ 4100, DMEC, AE, "Status Error in NDB" },
diff --git a/storage/ndb/test/ndbapi/Makefile.am b/storage/ndb/test/ndbapi/Makefile.am
index b55acd2420d..dad3108f492 100644
--- a/storage/ndb/test/ndbapi/Makefile.am
+++ b/storage/ndb/test/ndbapi/Makefile.am
@@ -38,6 +38,10 @@ DbCreate DbAsyncGenerator \
testSRBank \
test_event_merge
+EXTRA_PROGRAMS = \
+ test_event \
+ test_event_merge \
+ test_event_multi_table
#flexTimedAsynch
#testBlobs
#flex_bench_mysql
diff --git a/storage/ndb/test/ndbapi/testDict.cpp b/storage/ndb/test/ndbapi/testDict.cpp
index f6277484b04..2972123cbbe 100644
--- a/storage/ndb/test/ndbapi/testDict.cpp
+++ b/storage/ndb/test/ndbapi/testDict.cpp
@@ -233,6 +233,101 @@ int runCreateAndDrop(NDBT_Context* ctx, NDBT_Step* step){
return NDBT_OK;
}
+int runCreateAndDropAtRandom(NDBT_Context* ctx, NDBT_Step* step)
+{
+ myRandom48Init(NdbTick_CurrentMillisecond());
+ Ndb* pNdb = GETNDB(step);
+ NdbDictionary::Dictionary* pDic = pNdb->getDictionary();
+ int loops = ctx->getNumLoops();
+ int numTables = NDBT_Tables::getNumTables();
+ bool* tabList = new bool [ numTables ];
+ int tabCount;
+
+ {
+ for (int num = 0; num < numTables; num++) {
+ (void)pDic->dropTable(NDBT_Tables::getTable(num)->getName());
+ tabList[num] = false;
+ }
+ tabCount = 0;
+ }
+
+ NdbRestarter restarter;
+ int result = NDBT_OK;
+ int bias = 1; // 0-less 1-more
+ int i = 0;
+
+ while (i < loops) {
+ g_info << "loop " << i << " tabs " << tabCount << "/" << numTables << endl;
+ int num = myRandom48(numTables);
+ const NdbDictionary::Table* pTab = NDBT_Tables::getTable(num);
+ char tabName[200];
+ strcpy(tabName, pTab->getName());
+
+ if (tabList[num] == false) {
+ if (bias == 0 && myRandom48(100) < 80)
+ continue;
+ g_info << tabName << ": create" << endl;
+ if (pDic->createTable(*pTab) != 0) {
+ const NdbError err = pDic->getNdbError();
+ g_err << tabName << ": create failed: " << err << endl;
+ result = NDBT_FAILED;
+ break;
+ }
+ const NdbDictionary::Table* pTab2 = pDic->getTable(tabName);
+ if (pTab2 == NULL) {
+ const NdbError err = pDic->getNdbError();
+ g_err << tabName << ": verify create: " << err << endl;
+ result = NDBT_FAILED;
+ break;
+ }
+ tabList[num] = true;
+ assert(tabCount < numTables);
+ tabCount++;
+ if (tabCount == numTables)
+ bias = 0;
+ }
+ else {
+ if (bias == 1 && myRandom48(100) < 80)
+ continue;
+ g_info << tabName << ": drop" << endl;
+ if (restarter.insertErrorInAllNodes(4013) != 0) {
+ g_err << "error insert failed" << endl;
+ result = NDBT_FAILED;
+ break;
+ }
+ if (pDic->dropTable(tabName) != 0) {
+ const NdbError err = pDic->getNdbError();
+ g_err << tabName << ": drop failed: " << err << endl;
+ result = NDBT_FAILED;
+ break;
+ }
+ const NdbDictionary::Table* pTab2 = pDic->getTable(tabName);
+ if (pTab2 != NULL) {
+ g_err << tabName << ": verify drop: table exists" << endl;
+ result = NDBT_FAILED;
+ break;
+ }
+ if (pDic->getNdbError().code != 709 &&
+ pDic->getNdbError().code != 723) {
+ const NdbError err = pDic->getNdbError();
+ g_err << tabName << ": verify drop: " << err << endl;
+ result = NDBT_FAILED;
+ break;
+ }
+ tabList[num] = false;
+ assert(tabCount > 0);
+ tabCount--;
+ if (tabCount == 0)
+ bias = 1;
+ }
+ i++;
+ }
+
+ delete [] tabList;
+ return result;
+}
+
+
int runCreateAndDropWithData(NDBT_Context* ctx, NDBT_Step* step){
Ndb* pNdb = GETNDB(step);
int loops = ctx->getNumLoops();
@@ -1945,6 +2040,12 @@ TESTCASE("CreateAndDrop",
"Try to create and drop the table loop number of times\n"){
INITIALIZER(runCreateAndDrop);
}
+TESTCASE("CreateAndDropAtRandom",
+ "Try to create and drop table at random loop number of times\n"
+ "Uses all available tables\n"
+ "Uses error insert 4013 to make TUP verify table descriptor"){
+ INITIALIZER(runCreateAndDropAtRandom);
+}
TESTCASE("CreateAndDropWithData",
"Try to create and drop the table when it's filled with data\n"
"do this loop number of times\n"){
diff --git a/storage/ndb/test/ndbapi/testNodeRestart.cpp b/storage/ndb/test/ndbapi/testNodeRestart.cpp
index 5474837228a..ad1ea5ed6f2 100644
--- a/storage/ndb/test/ndbapi/testNodeRestart.cpp
+++ b/storage/ndb/test/ndbapi/testNodeRestart.cpp
@@ -294,6 +294,7 @@ int runRestarts(NDBT_Context* ctx, NDBT_Step* step){
}
i++;
}
+ ctx->stopTest();
return result;
}
diff --git a/storage/ndb/test/ndbapi/test_event_merge.cpp b/storage/ndb/test/ndbapi/test_event_merge.cpp
index 7a9a1986793..6936e860065 100644
--- a/storage/ndb/test/ndbapi/test_event_merge.cpp
+++ b/storage/ndb/test/ndbapi/test_event_merge.cpp
@@ -1535,11 +1535,19 @@ selecttables()
uint i;
for (i = 0; i < maxrun(); i++)
run(i).skip = false;
+ if (g_opts.opstring != 0) {
+ ll1("using all tables due to fixed ops");
+ return;
+ }
for (i = 0; i + 1 < maxrun(); i++)
run(urandom(maxrun())).skip = true;
uint cnt = 0;
- for (i = 0; i < maxrun(); i++)
- cnt += ! run(i).skip;
+ for (i = 0; i < maxrun(); i++) {
+ if (! run(i).skip) {
+ ll2("use table " << run(i).tabname);
+ cnt++;
+ }
+ }
ll1("use " << cnt << "/" << maxrun() << " tables in this loop");
}
diff --git a/storage/ndb/test/run-test/daily-basic-tests.txt b/storage/ndb/test/run-test/daily-basic-tests.txt
index 8d893f11288..f58a6f050d5 100644
--- a/storage/ndb/test/run-test/daily-basic-tests.txt
+++ b/storage/ndb/test/run-test/daily-basic-tests.txt
@@ -497,6 +497,10 @@ args: -n CreateAndDrop
max-time: 1500
cmd: testDict
+args: -n CreateAndDropAtRandom -l 200 T1
+
+max-time: 1500
+cmd: testDict
args: -n CreateAndDropWithData
max-time: 1500
@@ -531,6 +535,10 @@ max-time: 1500
cmd: testDict
args: -n TemporaryTables T1 T6 T7 T8
+max-time: 1500
+cmd: testDict
+args: -n Restart_NR2 T1
+
#
# TEST NDBAPI
#
diff --git a/storage/ndb/tools/desc.cpp b/storage/ndb/tools/desc.cpp
index 7c5ce68c950..49f188d12c0 100644
--- a/storage/ndb/tools/desc.cpp
+++ b/storage/ndb/tools/desc.cpp
@@ -293,7 +293,8 @@ void print_part_info(Ndb* pNdb, NDBT_Table* pTab)
{ "Partition", 0, NdbDictionary::Column::FRAGMENT },
{ "Row count", 0, NdbDictionary::Column::ROW_COUNT },
{ "Commit count", 0, NdbDictionary::Column::COMMIT_COUNT },
- { "Frag memory", 0, NdbDictionary::Column::FRAGMENT_MEMORY },
+ { "Frag fixed memory", 0, NdbDictionary::Column::FRAGMENT_FIXED_MEMORY },
+ { "Frag varsized memory", 0, NdbDictionary::Column::FRAGMENT_VARSIZED_MEMORY },
{ 0, 0, 0 }
};
diff --git a/storage/ndb/tools/restore/consumer_restore.cpp b/storage/ndb/tools/restore/consumer_restore.cpp
index b190652232e..44f4b770b05 100644
--- a/storage/ndb/tools/restore/consumer_restore.cpp
+++ b/storage/ndb/tools/restore/consumer_restore.cpp
@@ -148,17 +148,38 @@ BackupRestore::finalize_table(const TableS & table){
bool ret= true;
if (!m_restore && !m_restore_meta)
return ret;
- if (table.have_auto_inc())
+ if (!table.have_auto_inc())
+ return ret;
+
+ Uint64 max_val= table.get_max_auto_val();
+ do
{
- Uint64 max_val= table.get_max_auto_val();
- Uint64 auto_val;
+ Uint64 auto_val = ~(Uint64)0;
int r= m_ndb->readAutoIncrementValue(get_table(table.m_dictTable), auto_val);
- if (r == -1 && m_ndb->getNdbError().code != 626)
+ if (r == -1 && m_ndb->getNdbError().status == NdbError::TemporaryError)
+ {
+ NdbSleep_MilliSleep(50);
+ continue; // retry
+ }
+ else if (r == -1 && m_ndb->getNdbError().code != 626)
+ {
ret= false;
- else if (r == -1 || max_val+1 > auto_val)
- ret= m_ndb->setAutoIncrementValue(get_table(table.m_dictTable), max_val+1, false) != -1;
- }
- return ret;
+ }
+ else if ((r == -1 && m_ndb->getNdbError().code == 626) ||
+ max_val+1 > auto_val || auto_val == ~(Uint64)0)
+ {
+ r= m_ndb->setAutoIncrementValue(get_table(table.m_dictTable),
+ max_val+1, false);
+ if (r == -1 &&
+ m_ndb->getNdbError().status == NdbError::TemporaryError)
+ {
+ NdbSleep_MilliSleep(50);
+ continue; // retry
+ }
+ ret = (r == 0);
+ }
+ return (ret);
+ } while (1);
}
@@ -464,7 +485,8 @@ BackupRestore::object(Uint32 type, const void * ptr)
if (ret)
{
NdbError errobj= dict->getNdbError();
- info << "FAILED " << errobj << endl;
+ info << "FAILED" << endl;
+ err << "Create tablespace failed: " << old.getName() << ": " << errobj << endl;
return false;
}
info << "done" << endl;
@@ -502,7 +524,8 @@ BackupRestore::object(Uint32 type, const void * ptr)
if (ret)
{
NdbError errobj= dict->getNdbError();
- info << "FAILED" << errobj << endl;
+ info << "FAILED" << endl;
+ err << "Create logfile group failed: " << old.getName() << ": " << errobj << endl;
return false;
}
info << "done" << endl;
@@ -543,7 +566,9 @@ BackupRestore::object(Uint32 type, const void * ptr)
info << "Creating datafile \"" << old.getPath() << "\"..." << flush;
if (dict->createDatafile(old))
{
- info << "FAILED " << dict->getNdbError() << endl;
+ NdbError errobj= dict->getNdbError();
+ info << "FAILED" << endl;
+ err << "Create datafile failed: " << old.getPath() << ": " << errobj << endl;
return false;
}
info << "done" << endl;
@@ -567,7 +592,9 @@ BackupRestore::object(Uint32 type, const void * ptr)
info << "Creating undofile \"" << old.getPath() << "\"..." << flush;
if (dict->createUndofile(old))
{
- info << "FAILED " << dict->getNdbError() << endl;
+ NdbError errobj= dict->getNdbError();
+ info << "FAILED" << endl;
+ err << "Create undofile failed: " << old.getPath() << ": " << errobj << endl;
return false;
}
info << "done" << endl;
@@ -792,8 +819,6 @@ BackupRestore::table(const TableS & table){
}
info << "Successfully restored table event " << event_name << endl ;
}
-
- m_ndb->setAutoIncrementValue(tab, ~(Uint64)0, false);
}
const NdbDictionary::Table* null = 0;
m_new_tables.fill(table.m_dictTable->getTableId(), null);
diff --git a/storage/ndb/tools/waiter.cpp b/storage/ndb/tools/waiter.cpp
index cb02d5e7c36..e3d8733b0ed 100644
--- a/storage/ndb/tools/waiter.cpp
+++ b/storage/ndb/tools/waiter.cpp
@@ -92,7 +92,7 @@ int main(int argc, char** argv){
wait_status= NDB_MGM_NODE_STATUS_STARTED;
}
- if (waitClusterStatus(_hostName, wait_status, _timeout) != 0)
+ if (waitClusterStatus(_hostName, wait_status, _timeout*10) != 0)
return NDBT_ProgramExit(NDBT_FAILED);
return NDBT_ProgramExit(NDBT_OK);
}
@@ -311,7 +311,7 @@ waitClusterStatus(const char* _addr,
}
g_info << "Waiting for cluster enter state "
<< ndb_mgm_get_node_status_string(_status)<< endl;
- NdbSleep_SecSleep(1);
+ NdbSleep_MilliSleep(100);
attempts++;
}
return 0;
diff --git a/strings/decimal.c b/strings/decimal.c
index 7ed2d8f53df..736dc2e57c4 100644
--- a/strings/decimal.c
+++ b/strings/decimal.c
@@ -171,6 +171,7 @@ static const dec1 frac_max[DIG_PER_DEC1-1]={
do \
{ \
dec1 a=(from1)+(from2)+(carry); \
+ DBUG_ASSERT((carry) <= 1); \
if (((carry)= a >= DIG_BASE)) /* no division here! */ \
a-=DIG_BASE; \
(to)=a; \
@@ -179,7 +180,7 @@ static const dec1 frac_max[DIG_PER_DEC1-1]={
#define ADD2(to, from1, from2, carry) \
do \
{ \
- dec1 a=(from1)+(from2)+(carry); \
+ dec2 a=((dec2)(from1))+(from2)+(carry); \
if (((carry)= a >= DIG_BASE)) \
a-=DIG_BASE; \
if (unlikely(a >= DIG_BASE)) \
@@ -187,7 +188,7 @@ static const dec1 frac_max[DIG_PER_DEC1-1]={
a-=DIG_BASE; \
carry++; \
} \
- (to)=a; \
+ (to)=(dec1) a; \
} while(0)
#define SUB(to, from1, from2, carry) /* to=from1-from2 */ \
@@ -2004,7 +2005,13 @@ int decimal_mul(decimal_t *from1, decimal_t *from2, decimal_t *to)
ADD2(*buf0, *buf0, lo, carry);
carry+=hi;
}
- for (; carry; buf0--)
+ if (carry)
+ {
+ if (buf0 < to->buf)
+ return E_DEC_OVERFLOW;
+ ADD2(*buf0, *buf0, 0, carry);
+ }
+ for (buf0--; carry; buf0--)
{
if (buf0 < to->buf)
return E_DEC_OVERFLOW;
diff --git a/strings/strtod.c b/strings/strtod.c
index 61f2c107abe..e0910205d2f 100644
--- a/strings/strtod.c
+++ b/strings/strtod.c
@@ -26,7 +26,8 @@
*/
-#include "my_base.h" /* Includes errno.h */
+#include "my_base.h" /* Defines EOVERFLOW on Windows */
+#include "my_global.h" /* Includes errno.h */
#include "m_ctype.h"
#define MAX_DBL_EXP 308
diff --git a/support-files/mysql.spec.sh b/support-files/mysql.spec.sh
index 72e230da0c2..5d7c0ed154f 100644
--- a/support-files/mysql.spec.sh
+++ b/support-files/mysql.spec.sh
@@ -28,6 +28,23 @@
%define see_base For a description of MySQL see the base MySQL RPM or http://www.mysql.com
+# On SuSE 9 no separate "debuginfo" package is built. To enable basic
+# debugging on that platform, we don't strip binaries on SuSE 9. We
+# disable the strip of binaries by redefining the RPM macro
+# "__os_install_post" leaving out the script calls that normally does
+# this. We do this in all cases, as on platforms where "debuginfo" is
+# created, a script "find-debuginfo.sh" will be called that will do
+# the strip anyway, part of separating the executable and debug
+# information into separate files put into separate packages.
+#
+# Some references (shows more advanced conditional usage):
+# http://www.redhat.com/archives/rpm-list/2001-November/msg00257.html
+# http://www.redhat.com/archives/rpm-list/2003-February/msg00275.html
+# http://www.redhat.com/archives/rhl-devel-list/2004-January/msg01546.html
+# http://lists.opensuse.org/archive/opensuse-commit/2006-May/1171.html
+
+%define __os_install_post /usr/lib/rpm/brp-compress
+
Name: MySQL
Summary: MySQL: a very fast and reliable SQL database server
Group: Applications/Databases
@@ -315,7 +332,6 @@ fi
(cd mysql-debug-%{mysql_version} ; \
./mysql-test-run.pl --comment=debug --skip-rpl --skip-ndbcluster --force ; \
true)
-
##############################################################################
#
# Build the max binary
@@ -327,7 +343,6 @@ CFLAGS="${MYSQL_BUILD_CFLAGS:-$RPM_OPT_FLAGS} -g" \
CXXFLAGS="${MYSQL_BUILD_CXXFLAGS:-$RPM_OPT_FLAGS -felide-constructors -fno-exceptions -fno-rtti} -g" \
BuildMySQL "--enable-shared \
--with-berkeley-db \
- --with-innodb \
--with-ndbcluster \
--with-archive-storage-engine \
--with-csv-storage-engine \
@@ -367,7 +382,6 @@ BuildMySQL "--enable-shared \
--with-embedded-server \
--with-big-tables \
--with-comment=\"MySQL Community Server (GPL)\"")
-
# We might want to save the config log file
if test -n "$MYSQL_CONFLOG_DEST"
then
@@ -478,17 +492,7 @@ chown -R %{mysqld_user}:%{mysqld_group} $mysql_datadir
# Initiate databases if needed
%{_bindir}/mysql_install_db --rpm --user=%{mysqld_user}
-# Upgrade databases if needed
-# This must be done as database user "root", who should be password-protected,
-# but this password is not available here.
-# So ensure the server is isolated as much as possible, and start it so that
-# passwords are not checked.
-# See the related change in the start script "/etc/init.d/mysql".
-chmod 700 $mysql_datadir
-%{_sysconfdir}/init.d/mysql start --skip-networking --skip-grant-tables
-%{_bindir}/mysql_upgrade
-%{_sysconfdir}/init.d/mysql stop --skip-networking --skip-grant-tables
-chmod 755 $mysql_datadir
+# Upgrade databases if needed would go here - but it cannot be automated yet
# Change permissions again to fix any new files.
chown -R %{mysqld_user}:%{mysqld_group} $mysql_datadir
@@ -584,6 +588,7 @@ fi
%attr(755, root, root) %{_bindir}/mysqlbug
%attr(755, root, root) %{_bindir}/mysqld_multi
%attr(755, root, root) %{_bindir}/mysqld_safe
+%attr(755, root, root) %{_bindir}/mysqldumpslow
%attr(755, root, root) %{_bindir}/mysqlhotcopy
%attr(755, root, root) %{_bindir}/mysqltest
%attr(755, root, root) %{_bindir}/perror
@@ -613,7 +618,6 @@ fi
%attr(755, root, root) %{_bindir}/mysqlbinlog
%attr(755, root, root) %{_bindir}/mysqlcheck
%attr(755, root, root) %{_bindir}/mysqldump
-%attr(755, root, root) %{_bindir}/mysqldumpslow
%attr(755, root, root) %{_bindir}/mysqlimport
%attr(755, root, root) %{_bindir}/mysqlshow
%attr(755, root, root) %{_bindir}/mysqlslap
@@ -701,6 +705,29 @@ fi
# itself - note that they must be ordered by date (important when
# merging BK trees)
%changelog
+* Mon Jul 10 2006 Joerg Bruehe <joerg@mysql.com>
+
+- Fix a typing error in the "make" target for the Perl script to run the tests.
+
+* Tue Jul 04 2006 Joerg Bruehe <joerg@mysql.com>
+
+- Use the Perl script to run the tests, because it will automatically check
+ whether the server is configured with SSL.
+
+* Tue Jun 27 2006 Joerg Bruehe <joerg@mysql.com>
+
+- move "mysqldumpslow" from the client RPM to the server RPM (bug#20216)
+
+- Revert all previous attempts to call "mysql_upgrade" during RPM upgrade,
+ there are some more aspects which need to be solved before this is possible.
+ For now, just ensure the binary "mysql_upgrade" is delivered and installed.
+
+* Thu Jun 22 2006 Joerg Bruehe <joerg@mysql.com>
+
+- Close a gap of the previous version by explicitly using
+ a newly created temporary directory for the socket to be used
+ in the "mysql_upgrade" operation, overriding any local setting.
+
* Tue Jun 20 2006 Joerg Bruehe <joerg@mysql.com>
- To run "mysql_upgrade", we need a running server;
diff --git a/tests/Makefile.am b/tests/Makefile.am
index 8ee64f8179c..fd7bc5a532e 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -50,8 +50,14 @@ INCLUDES = -I$(top_builddir)/include -I$(top_srcdir)/include \
LIBS = @CLIENT_LIBS@
LDADD = @CLIENT_EXTRA_LDFLAGS@ \
$(LIBMYSQLCLIENT_LA)
+if HAVE_NETWARE
+mysql_client_test_LDADD= $(LDADD) $(CXXLDFLAGS)
+mysql_client_test_SOURCES= mysql_client_test.c $(yassl_dummy_link_fix) \
+ ../mysys/my_memmem.c
+else
mysql_client_test_LDADD= $(LDADD) $(CXXLDFLAGS) -L../mysys -lmysys
mysql_client_test_SOURCES= mysql_client_test.c $(yassl_dummy_link_fix)
+endif
insert_test_SOURCES= insert_test.c $(yassl_dummy_link_fix)
select_test_SOURCES= select_test.c $(yassl_dummy_link_fix)
insert_test_DEPENDENCIES= $(LIBRARIES) $(pkglib_LTLIBRARIES)
diff --git a/tests/mysql_client_test.c b/tests/mysql_client_test.c
index e3159cfa5e5..cbf8a1cbf01 100644
--- a/tests/mysql_client_test.c
+++ b/tests/mysql_client_test.c
@@ -58,6 +58,7 @@ static unsigned int iter_count= 0;
static my_bool have_innodb= FALSE;
static const char *opt_basedir= "./";
+static const char *opt_vardir= "mysql-test/var";
static longlong opt_getopt_ll_test= 0;
@@ -8311,6 +8312,39 @@ static void test_list_fields()
}
+static void test_bug19671()
+{
+ MYSQL_RES *result;
+ int rc;
+ myheader("test_bug19671");
+
+ rc= mysql_query(mysql, "drop table if exists t1");
+ myquery(rc);
+
+ rc= mysql_query(mysql, "drop view if exists v1");
+ myquery(rc);
+
+ rc= mysql_query(mysql, "create table t1(f1 int)");
+ myquery(rc);
+
+ rc= mysql_query(mysql, "create view v1 as select va.* from t1 va");
+ myquery(rc);
+
+ result= mysql_list_fields(mysql, "v1", NULL);
+ mytest(result);
+
+ rc= my_process_result_set(result);
+ DIE_UNLESS(rc == 0);
+
+ verify_prepare_field(result, 0, "f1", "f1", MYSQL_TYPE_LONG,
+ "v1", "v1", current_db, 11, "0");
+
+ mysql_free_result(result);
+ myquery(mysql_query(mysql, "drop view v1"));
+ myquery(mysql_query(mysql, "drop table t1"));
+}
+
+
/* Test a memory ovverun bug */
static void test_mem_overun()
@@ -13066,7 +13100,7 @@ static void test_bug9478()
int4store(buff, stmt->stmt_id);
buff[4]= 1; /* prefetch rows */
rc= ((*mysql->methods->advanced_command)(mysql, COM_STMT_FETCH, buff,
- sizeof(buff), 0,0,1) ||
+ sizeof(buff), 0,0,1,NULL) ||
(*mysql->methods->read_query_result)(mysql));
DIE_UNLESS(rc);
if (!opt_silent && i == 0)
@@ -14868,6 +14902,7 @@ static void test_bug17667()
struct buffer_and_length *statement_cursor;
FILE *log_file;
+ char *master_log_filename;
myheader("test_bug17667");
@@ -14879,7 +14914,13 @@ static void test_bug17667()
}
sleep(1); /* The server may need time to flush the data to the log. */
- log_file= fopen("var/log/master.log", "r");
+
+ master_log_filename = (char *) malloc(strlen(opt_vardir) + strlen("/log/master.log") + 1);
+ strcpy(master_log_filename, opt_vardir);
+ strcat(master_log_filename, "/log/master.log");
+ log_file= fopen(master_log_filename, "r");
+ free(master_log_filename);
+
if (log_file != NULL) {
for (statement_cursor= statements; statement_cursor->buffer != NULL;
@@ -14900,11 +14941,13 @@ static void test_bug17667()
printf("success. All queries found intact in the log.\n");
- } else {
- fprintf(stderr, "Could not find the log file, var/log/master.log, so "
- "test_bug17667 is \ninconclusive. Run test from the "
- "mysql-test/mysql-test-run* program \nto set up the correct "
- "environment for this test.\n\n");
+ }
+ else
+ {
+ fprintf(stderr, "Could not find the log file, VARDIR/log/master.log, so "
+ "test_bug17667 is \ninconclusive. Run test from the "
+ "mysql-test/mysql-test-run* program \nto set up the correct "
+ "environment for this test.\n\n");
}
if (log_file != NULL)
@@ -14914,7 +14957,8 @@ static void test_bug17667()
/*
- Bug#14169: type of group_concat() result changed to blob if tmp_table was used
+ Bug#14169: type of group_concat() result changed to blob if tmp_table was
+ used
*/
static void test_bug14169()
{
@@ -14947,7 +14991,243 @@ static void test_bug14169()
rc= mysql_query(mysql, "drop table t1");
myquery(rc);
-}/*
+}
+
+/*
+ Test that mysql_insert_id() behaves as documented in our manual
+*/
+
+static void test_mysql_insert_id()
+{
+ my_ulonglong res;
+ int rc;
+
+ myheader("test_mysql_insert_id");
+
+ rc= mysql_query(mysql, "drop table if exists t1");
+ myquery(rc);
+ /* table without auto_increment column */
+ rc= mysql_query(mysql, "create table t1 (f1 int, f2 varchar(255), key(f1))");
+ myquery(rc);
+ rc= mysql_query(mysql, "insert into t1 values (1,'a')");
+ myquery(rc);
+ res= mysql_insert_id(mysql);
+ DIE_UNLESS(res == 0);
+ rc= mysql_query(mysql, "insert into t1 values (null,'b')");
+ myquery(rc);
+ res= mysql_insert_id(mysql);
+ DIE_UNLESS(res == 0);
+ rc= mysql_query(mysql, "insert into t1 select 5,'c'");
+ myquery(rc);
+ res= mysql_insert_id(mysql);
+ DIE_UNLESS(res == 0);
+ rc= mysql_query(mysql, "insert into t1 select null,'d'");
+ myquery(rc);
+ res= mysql_insert_id(mysql);
+ DIE_UNLESS(res == 0);
+ rc= mysql_query(mysql, "insert into t1 values (null,last_insert_id(300))");
+ myquery(rc);
+ res= mysql_insert_id(mysql);
+ DIE_UNLESS(res == 300);
+ rc= mysql_query(mysql, "insert into t1 select null,last_insert_id(400)");
+ myquery(rc);
+ res= mysql_insert_id(mysql);
+ /*
+ Behaviour change: old code used to return 0; but 400 is consistent
+ with INSERT VALUES, and the manual's section of mysql_insert_id() does not
+ say INSERT SELECT should be different.
+ */
+ DIE_UNLESS(res == 400);
+
+ /* table with auto_increment column */
+ rc= mysql_query(mysql, "create table t2 (f1 int not null primary key auto_increment, f2 varchar(255))");
+ myquery(rc);
+ rc= mysql_query(mysql, "insert into t2 values (1,'a')");
+ myquery(rc);
+ res= mysql_insert_id(mysql);
+ DIE_UNLESS(res == 1);
+ /* this should not influence next INSERT if it doesn't have auto_inc */
+ rc= mysql_query(mysql, "insert into t1 values (10,'e')");
+ myquery(rc);
+ res= mysql_insert_id(mysql);
+ DIE_UNLESS(res == 0);
+
+ rc= mysql_query(mysql, "insert into t2 values (null,'b')");
+ myquery(rc);
+ res= mysql_insert_id(mysql);
+ DIE_UNLESS(res == 2);
+ rc= mysql_query(mysql, "insert into t2 select 5,'c'");
+ myquery(rc);
+ res= mysql_insert_id(mysql);
+ /*
+ Manual says that for multirow insert this should have been 5, but does not
+ say for INSERT SELECT. This is a behaviour change: old code used to return
+ 0. We try to be consistent with INSERT VALUES.
+ */
+ DIE_UNLESS(res == 5);
+ rc= mysql_query(mysql, "insert into t2 select null,'d'");
+ myquery(rc);
+ res= mysql_insert_id(mysql);
+ DIE_UNLESS(res == 6);
+ /* with more than one row */
+ rc= mysql_query(mysql, "insert into t2 values (10,'a'),(11,'b')");
+ myquery(rc);
+ res= mysql_insert_id(mysql);
+ DIE_UNLESS(res == 11);
+ rc= mysql_query(mysql, "insert into t2 select 12,'a' union select 13,'b'");
+ myquery(rc);
+ res= mysql_insert_id(mysql);
+ /*
+ Manual says that for multirow insert this should have been 13, but does
+ not say for INSERT SELECT. This is a behaviour change: old code used to
+ return 0. We try to be consistent with INSERT VALUES.
+ */
+ DIE_UNLESS(res == 13);
+ rc= mysql_query(mysql, "insert into t2 values (null,'a'),(null,'b')");
+ myquery(rc);
+ res= mysql_insert_id(mysql);
+ DIE_UNLESS(res == 14);
+ rc= mysql_query(mysql, "insert into t2 select null,'a' union select null,'b'");
+ myquery(rc);
+ res= mysql_insert_id(mysql);
+ DIE_UNLESS(res == 16);
+ rc= mysql_query(mysql, "insert into t2 select 12,'a' union select 13,'b'");
+ myquery_r(rc);
+ rc= mysql_query(mysql, "insert ignore into t2 select 12,'a' union select 13,'b'");
+ myquery(rc);
+ res= mysql_insert_id(mysql);
+ DIE_UNLESS(res == 0);
+ rc= mysql_query(mysql, "insert into t2 values (12,'a'),(13,'b')");
+ myquery_r(rc);
+ res= mysql_insert_id(mysql);
+ DIE_UNLESS(res == 0);
+ rc= mysql_query(mysql, "insert ignore into t2 values (12,'a'),(13,'b')");
+ myquery(rc);
+ res= mysql_insert_id(mysql);
+ DIE_UNLESS(res == 0);
+ /* mixing autogenerated and explicit values */
+ rc= mysql_query(mysql, "insert into t2 values (null,'e'),(12,'a'),(13,'b')");
+ myquery_r(rc);
+ rc= mysql_query(mysql, "insert into t2 values (null,'e'),(12,'a'),(13,'b'),(25,'g')");
+ myquery_r(rc);
+ rc= mysql_query(mysql, "insert into t2 values (null,last_insert_id(300))");
+ myquery(rc);
+ res= mysql_insert_id(mysql);
+ /*
+ according to the manual, this might be 20 or 300, but it looks like
+ auto_increment column takes priority over last_insert_id().
+ */
+ DIE_UNLESS(res == 20);
+ /* If first autogenerated number fails and 2nd works: */
+ rc= mysql_query(mysql, "drop table t2");
+ myquery(rc);
+ rc= mysql_query(mysql, "create table t2 (f1 int not null primary key "
+ "auto_increment, f2 varchar(255), unique (f2))");
+ myquery(rc);
+ rc= mysql_query(mysql, "insert into t2 values (null,'e')");
+ res= mysql_insert_id(mysql);
+ DIE_UNLESS(res == 1);
+ rc= mysql_query(mysql, "insert ignore into t2 values (null,'e'),(null,'a'),(null,'e')");
+ myquery(rc);
+ res= mysql_insert_id(mysql);
+ DIE_UNLESS(res == 2);
+ /* If autogenerated fails and explicit works: */
+ rc= mysql_query(mysql, "insert ignore into t2 values (null,'e'),(12,'c'),(null,'d')");
+ myquery(rc);
+ res= mysql_insert_id(mysql);
+ /*
+ Behaviour change: old code returned 3 (first autogenerated, even if it
+ fails); we now return first successful autogenerated.
+ */
+ DIE_UNLESS(res == 13);
+ /* UPDATE may update mysql_insert_id() if it uses LAST_INSERT_ID(#) */
+ rc= mysql_query(mysql, "update t2 set f1=14 where f1=12");
+ myquery(rc);
+ res= mysql_insert_id(mysql);
+ DIE_UNLESS(res == 0);
+ rc= mysql_query(mysql, "update t2 set f1=NULL where f1=14");
+ myquery(rc);
+ res= mysql_insert_id(mysql);
+ DIE_UNLESS(res == 0);
+ rc= mysql_query(mysql, "update t2 set f2=last_insert_id(372) where f1=0");
+ myquery(rc);
+ res= mysql_insert_id(mysql);
+ DIE_UNLESS(res == 372);
+ /* check that LAST_INSERT_ID() does not update mysql_insert_id(): */
+ rc= mysql_query(mysql, "insert into t2 values (null,'g')");
+ myquery(rc);
+ res= mysql_insert_id(mysql);
+ DIE_UNLESS(res == 15);
+ rc= mysql_query(mysql, "update t2 set f2=(@li:=last_insert_id()) where f1=15");
+ myquery(rc);
+ res= mysql_insert_id(mysql);
+ DIE_UNLESS(res == 0);
+ /*
+ Behaviour change: now if ON DUPLICATE KEY UPDATE updates a row,
+ mysql_insert_id() returns the id of the row, instead of not being
+ affected.
+ */
+ rc= mysql_query(mysql, "insert into t2 values (null,@li) on duplicate key "
+ "update f2=concat('we updated ',f2)");
+ myquery(rc);
+ res= mysql_insert_id(mysql);
+ DIE_UNLESS(res == 15);
+
+ rc= mysql_query(mysql, "drop table t1,t2");
+ myquery(rc);
+}
+
+/*
+ Bug#20152: mysql_stmt_execute() writes to MYSQL_TYPE_DATE buffer
+*/
+
+static void test_bug20152()
+{
+ MYSQL_BIND bind[1];
+ MYSQL_STMT *stmt;
+ MYSQL_TIME tm;
+ int rc;
+ const char *query= "INSERT INTO t1 (f1) VALUES (?)";
+
+ myheader("test_bug20152");
+
+ memset(bind, 0, sizeof(bind));
+ bind[0].buffer_type= MYSQL_TYPE_DATE;
+ bind[0].buffer= (void*)&tm;
+
+ tm.year = 2006;
+ tm.month = 6;
+ tm.day = 18;
+ tm.hour = 14;
+ tm.minute = 9;
+ tm.second = 42;
+
+ rc= mysql_query(mysql, "DROP TABLE IF EXISTS t1");
+ myquery(rc);
+ rc= mysql_query(mysql, "CREATE TABLE t1 (f1 DATE)");
+ myquery(rc);
+
+ stmt= mysql_stmt_init(mysql);
+ rc= mysql_stmt_prepare(stmt, query, strlen(query));
+ check_execute(stmt, rc);
+ rc= mysql_stmt_bind_param(stmt, bind);
+ check_execute(stmt, rc);
+ rc= mysql_stmt_execute(stmt);
+ check_execute(stmt, rc);
+ rc= mysql_stmt_close(stmt);
+ check_execute(stmt, rc);
+ rc= mysql_query(mysql, "DROP TABLE t1");
+ myquery(rc);
+
+ if (tm.hour == 14 && tm.minute == 9 && tm.second == 42) {
+ if (!opt_silent)
+ printf("OK!");
+ } else {
+ printf("[14:09:42] != [%02d:%02d:%02d]\n", tm.hour, tm.minute, tm.second);
+ DIE_UNLESS(0==1);
+ }
+}
+/*
Read and parse arguments and MySQL options from my.cnf
*/
@@ -14989,6 +15269,8 @@ static struct my_option client_test_long_options[] =
{"user", 'u', "User for login if not current user", (char **) &opt_user,
(char **) &opt_user, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
#endif
+ {"vardir", 'v', "Data dir for tests.", (gptr*) &opt_vardir,
+ (gptr*) &opt_vardir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"getopt-ll-test", 'g', "Option for testing bug in getopt library",
(char **) &opt_getopt_ll_test, (char **) &opt_getopt_ll_test, 0,
GET_LL, REQUIRED_ARG, 0, 0, LONGLONG_MAX, 0, 0, 0},
@@ -15212,8 +15494,11 @@ static struct my_tests_st my_tests[]= {
{ "test_bug16143", test_bug16143 },
{ "test_bug16144", test_bug16144 },
{ "test_bug15613", test_bug15613 },
+ { "test_bug20152", test_bug20152 },
{ "test_bug14169", test_bug14169 },
{ "test_bug17667", test_bug17667 },
+ { "test_mysql_insert_id", test_mysql_insert_id },
+ { "test_bug19671", test_bug19671},
{ 0, 0 }
};
diff --git a/unittest/mysys/my_atomic-t.c b/unittest/mysys/my_atomic-t.c
index 71408ce957f..4e2e496c3b1 100644
--- a/unittest/mysys/my_atomic-t.c
+++ b/unittest/mysys/my_atomic-t.c
@@ -166,9 +166,9 @@ int main()
pthread_cond_init(&cond, 0);
my_atomic_rwlock_init(&rwl);
- test_atomic("my_atomic_add32", test_atomic_add_handler, 100,1000000);
- test_atomic("my_atomic_swap32", test_atomic_swap_handler, 100,1000000);
- test_atomic("my_atomic_cas32", test_atomic_cas_handler, 100,1000000);
+ test_atomic("my_atomic_add32", test_atomic_add_handler, 100,10000);
+ test_atomic("my_atomic_swap32", test_atomic_swap_handler, 100,10000);
+ test_atomic("my_atomic_cas32", test_atomic_cas_handler, 100,10000);
pthread_mutex_destroy(&mutex);
pthread_cond_destroy(&cond);