summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CMakeLists.txt41
-rw-r--r--VERSION2
-rw-r--r--client/mysqlbinlog.cc5
-rw-r--r--client/mysqltest.cc2
-rw-r--r--cmake/build_configurations/mysql_release.cmake19
-rw-r--r--cmake/compile_flags.cmake44
-rw-r--r--cmake/cpack_rpm.cmake1
-rw-r--r--cmake/libutils.cmake2
-rw-r--r--cmake/maintainer.cmake93
-rw-r--r--cmake/os/Darwin.cmake21
-rw-r--r--cmake/os/WindowsCache.cmake6
-rw-r--r--configure.cmake10
-rw-r--r--extra/CMakeLists.txt22
-rw-r--r--extra/innochecksum.c325
-rw-r--r--extra/innochecksum.cc470
-rw-r--r--extra/replace.c4
-rw-r--r--extra/yassl/README9
-rw-r--r--extra/yassl/include/openssl/ssl.h2
-rw-r--r--extra/yassl/src/yassl_imp.cpp6
-rw-r--r--extra/yassl/taocrypt/src/rsa.cpp2
-rw-r--r--include/my_check_opt.h8
-rw-r--r--include/my_pthread.h21
-rw-r--r--include/mysql.h1
-rw-r--r--include/mysql.h.pp1
-rw-r--r--libmysql/CMakeLists.txt3
-rw-r--r--libmysql/libmysql.c3
-rw-r--r--libmysqld/lib_sql.cc3
-rw-r--r--mysql-test/include/ctype_like_cond_propagation.inc39
-rw-r--r--mysql-test/include/ctype_like_cond_propagation_utf8_german.inc16
-rw-r--r--mysql-test/include/not_embedded.inc7
-rw-r--r--mysql-test/include/restart_mysqld.inc1
-rw-r--r--mysql-test/lib/My/SafeProcess/Base.pm1
-rw-r--r--mysql-test/lib/My/SafeProcess/safe_process.cc3
-rwxr-xr-xmysql-test/mysql-test-run.pl80
-rw-r--r--mysql-test/r/change_user_notembedded.result1
-rw-r--r--mysql-test/r/create_or_replace.result2
-rw-r--r--mysql-test/r/ctype_binary.result95
-rw-r--r--mysql-test/r/ctype_latin1.result192
-rw-r--r--mysql-test/r/ctype_uca.result303
-rw-r--r--mysql-test/r/ctype_ucs.result193
-rw-r--r--mysql-test/r/ctype_utf8.result229
-rw-r--r--mysql-test/r/frm_bad_row_type-7333.result14
-rw-r--r--mysql-test/r/func_time.result24
-rw-r--r--mysql-test/r/group_by.result14
-rw-r--r--mysql-test/r/group_by_innodb.result23
-rw-r--r--mysql-test/r/insert_update_autoinc-7150.result9
-rw-r--r--mysql-test/r/key_cache.result64
-rw-r--r--mysql-test/r/kill-2.result10
-rw-r--r--mysql-test/r/kill_processlist-6619.result6
-rw-r--r--mysql-test/r/kill_query-6728.result7
-rw-r--r--mysql-test/r/log_tables.result3
-rw-r--r--mysql-test/r/mdev6830.result49
-rw-r--r--mysql-test/r/not_embedded.require2
-rw-r--r--mysql-test/r/processlist.result5
-rw-r--r--mysql-test/r/select_found.result16
-rw-r--r--mysql-test/r/show_check.result6
-rw-r--r--mysql-test/r/sp-innodb.result32
-rw-r--r--mysql-test/r/statistics_index_crash-7362.result33
-rw-r--r--mysql-test/r/type_timestamp.result46
-rw-r--r--mysql-test/r/view.result2
-rw-r--r--mysql-test/r/windows.result5
-rw-r--r--mysql-test/std_data/bad_row_type.MYD0
-rw-r--r--mysql-test/std_data/bad_row_type.MYIbin0 -> 1024 bytes
-rw-r--r--mysql-test/std_data/bad_row_type.frmbin0 -> 1760 bytes
-rw-r--r--mysql-test/suite.pm2
-rw-r--r--mysql-test/suite/binlog/r/binlog_checkpoint.result12
-rw-r--r--mysql-test/suite/binlog/r/binlog_switch_inside_trans.result8
-rw-r--r--mysql-test/suite/binlog/t/binlog_checkpoint.test31
-rw-r--r--mysql-test/suite/binlog/t/binlog_grant.test2
-rw-r--r--mysql-test/suite/binlog/t/binlog_switch_inside_trans.test6
-rw-r--r--mysql-test/suite/engines/funcs/combinations11
-rw-r--r--mysql-test/suite/engines/funcs/r/db_create_drop.result8
-rw-r--r--mysql-test/suite/engines/funcs/r/db_create_error.result2
-rw-r--r--mysql-test/suite/engines/funcs/r/db_create_if_not_exists.result4
-rw-r--r--mysql-test/suite/engines/funcs/r/db_drop_error.result2
-rw-r--r--mysql-test/suite/engines/funcs/r/db_use_error.result2
-rw-r--r--mysql-test/suite/engines/funcs/r/sf_alter.result540
-rw-r--r--mysql-test/suite/engines/funcs/r/sf_cursor.result9
-rw-r--r--mysql-test/suite/engines/funcs/r/sp_alter.result120
-rw-r--r--mysql-test/suite/engines/funcs/r/sp_cursor.result2
-rw-r--r--mysql-test/suite/engines/funcs/r/sq_error.result8
-rw-r--r--mysql-test/suite/engines/funcs/r/ta_rename.result64
-rw-r--r--mysql-test/suite/engines/funcs/r/tc_rename_error.result2
-rw-r--r--mysql-test/suite/engines/funcs/t/db_create_drop.test4
-rw-r--r--mysql-test/suite/engines/funcs/t/db_create_error.test2
-rw-r--r--mysql-test/suite/engines/funcs/t/db_create_if_not_exists.test2
-rw-r--r--mysql-test/suite/engines/funcs/t/db_drop_error.test1
-rw-r--r--mysql-test/suite/engines/funcs/t/db_use_error.test1
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_row_until.test12
-rw-r--r--mysql-test/suite/engines/funcs/t/tc_rename_error.test2
-rw-r--r--mysql-test/suite/engines/iuds/combinations8
-rw-r--r--mysql-test/suite/engines/iuds/r/strings_charsets_update_delete.resultbin112951 -> 113116 bytes
-rw-r--r--mysql-test/suite/engines/iuds/r/strings_update_delete.result28
-rw-r--r--mysql-test/suite/engines/iuds/r/type_bit_iuds.result128
-rw-r--r--mysql-test/suite/engines/iuds/r/update_delete_number.result14
-rw-r--r--mysql-test/suite/innodb/r/group_commit_crash.result1
-rw-r--r--mysql-test/suite/innodb/r/group_commit_crash_no_optimize_thread.result1
-rw-r--r--mysql-test/suite/innodb/r/innochecksum.result31
-rw-r--r--mysql-test/suite/innodb/r/innodb-mdev7046.result1
-rw-r--r--mysql-test/suite/innodb/r/innodb-stats-sample.result4
-rw-r--r--mysql-test/suite/innodb/r/innodb-wl5522-debug-zip.result6
-rw-r--r--mysql-test/suite/innodb/r/innodb_bug12400341.result2
-rw-r--r--mysql-test/suite/innodb/r/innodb_corrupt_bit.result1
-rw-r--r--mysql-test/suite/innodb/r/innodb_stats_create_on_corrupted.result12
-rw-r--r--mysql-test/suite/innodb/r/innodb_stats_fetch_nonexistent.result1
-rw-r--r--mysql-test/suite/innodb/r/insert_debug.result11
-rw-r--r--mysql-test/suite/innodb/r/multi_repair-7404.result21
-rw-r--r--mysql-test/suite/innodb/r/sp_temp_table.result253
-rw-r--r--mysql-test/suite/innodb/r/strict_mode.result242
-rw-r--r--mysql-test/suite/innodb/t/group_commit_crash.test2
-rw-r--r--mysql-test/suite/innodb/t/group_commit_crash_no_optimize_thread.test2
-rw-r--r--mysql-test/suite/innodb/t/innochecksum.opt2
-rw-r--r--mysql-test/suite/innodb/t/innochecksum.test70
-rw-r--r--mysql-test/suite/innodb/t/innodb-alter-table-disk-full.test3
-rw-r--r--mysql-test/suite/innodb/t/innodb-change-buffer-recovery.test2
-rw-r--r--mysql-test/suite/innodb/t/innodb-mdev7046.test48
-rw-r--r--mysql-test/suite/innodb/t/innodb-stats-sample.test78
-rw-r--r--mysql-test/suite/innodb/t/innodb-wl5522-debug-zip.test11
-rw-r--r--mysql-test/suite/innodb/t/innodb_bug12400341.test7
-rw-r--r--mysql-test/suite/innodb/t/innodb_bug14147491.test3
-rw-r--r--mysql-test/suite/innodb/t/innodb_corrupt_bit.test17
-rw-r--r--mysql-test/suite/innodb/t/innodb_stats_create_on_corrupted.test12
-rw-r--r--mysql-test/suite/innodb/t/innodb_stats_fetch_nonexistent.test2
-rw-r--r--mysql-test/suite/innodb/t/insert_debug.test22
-rw-r--r--mysql-test/suite/innodb/t/multi_repair-7404.test18
-rw-r--r--mysql-test/suite/innodb/t/sp_temp_table.test108
-rw-r--r--mysql-test/suite/innodb/t/strict_mode.test251
-rw-r--r--mysql-test/suite/maria/insert_select-7314.result17
-rw-r--r--mysql-test/suite/maria/insert_select-7314.test27
-rw-r--r--mysql-test/suite/maria/insert_select.result6
-rw-r--r--mysql-test/suite/maria/insert_select.test21
-rw-r--r--mysql-test/suite/multi_source/gtid.result55
-rw-r--r--mysql-test/suite/multi_source/gtid.test11
-rw-r--r--mysql-test/suite/perfschema/r/unary_digest.result47
-rw-r--r--mysql-test/suite/perfschema/t/setup_instruments_defaults.test2
-rw-r--r--mysql-test/suite/perfschema/t/unary_digest.test98
-rw-r--r--mysql-test/suite/plugins/t/server_audit.test1
-rw-r--r--mysql-test/suite/rpl/r/myisam_external_lock.result12
-rw-r--r--mysql-test/suite/rpl/r/rpl_gtid_basic.result4
-rw-r--r--mysql-test/suite/rpl/r/rpl_gtid_crash.result62
-rw-r--r--mysql-test/suite/rpl/r/rpl_mdev6386.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_parallel.result164
-rw-r--r--mysql-test/suite/rpl/r/rpl_parallel2.result1
-rw-r--r--mysql-test/suite/rpl/t/myisam_external_lock-slave.opt2
-rw-r--r--mysql-test/suite/rpl/t/myisam_external_lock.test24
-rw-r--r--mysql-test/suite/rpl/t/rpl_gtid_basic.test4
-rw-r--r--mysql-test/suite/rpl/t/rpl_gtid_crash-slave.opt1
-rw-r--r--mysql-test/suite/rpl/t/rpl_gtid_crash.test107
-rw-r--r--mysql-test/suite/rpl/t/rpl_mdev6386.test4
-rw-r--r--mysql-test/suite/rpl/t/rpl_parallel.test309
-rw-r--r--mysql-test/suite/rpl/t/rpl_parallel2.test7
-rw-r--r--mysql-test/suite/sys_vars/r/sql_log_bin_basic.result7
-rw-r--r--mysql-test/suite/sys_vars/r/stored_program_cache_basic.result6
-rw-r--r--mysql-test/suite/sys_vars/r/stored_program_cache_func.result52
-rw-r--r--mysql-test/suite/sys_vars/t/sql_log_bin_basic.test6
-rw-r--r--mysql-test/suite/sys_vars/t/stored_program_cache_func.test43
-rw-r--r--mysql-test/t/change_user_notembedded.test2
-rw-r--r--mysql-test/t/create_or_replace.test10
-rw-r--r--mysql-test/t/ctype_binary.test12
-rw-r--r--mysql-test/t/ctype_latin1.test7
-rw-r--r--mysql-test/t/ctype_uca.test27
-rw-r--r--mysql-test/t/ctype_ucs.test6
-rw-r--r--mysql-test/t/ctype_utf8.test7
-rw-r--r--mysql-test/t/frm_bad_row_type-7333.test14
-rw-r--r--mysql-test/t/func_time.test18
-rw-r--r--mysql-test/t/group_by.test15
-rw-r--r--mysql-test/t/group_by_innodb.test19
-rw-r--r--mysql-test/t/insert_update_autoinc-7150.test8
-rw-r--r--mysql-test/t/key_cache.test12
-rw-r--r--mysql-test/t/kill-2-master.opt1
-rw-r--r--mysql-test/t/kill-2.test29
-rw-r--r--mysql-test/t/kill_processlist-6619.test12
-rw-r--r--mysql-test/t/kill_query-6728.test14
-rw-r--r--mysql-test/t/lock_sync.test1
-rw-r--r--mysql-test/t/log_tables.test3
-rw-r--r--mysql-test/t/mdev6830-master.opt1
-rw-r--r--mysql-test/t/mdev6830.test63
-rw-r--r--mysql-test/t/partition_innodb.test4
-rw-r--r--mysql-test/t/partition_innodb_plugin.test6
-rw-r--r--mysql-test/t/processlist.test18
-rw-r--r--mysql-test/t/select_found.test20
-rw-r--r--mysql-test/t/show_check.test25
-rw-r--r--mysql-test/t/sp-innodb.test45
-rw-r--r--mysql-test/t/sp_notembedded.test3
-rw-r--r--mysql-test/t/statistics_index_crash-7362.test30
-rw-r--r--mysql-test/t/type_timestamp.test47
-rw-r--r--mysql-test/t/view.test10
-rw-r--r--mysql-test/t/windows.test16
-rw-r--r--mysys/mf_keycache.c59
-rw-r--r--mysys/my_context.c13
-rw-r--r--mysys/my_wincond.c24
-rw-r--r--mysys/thr_lock.c19
-rw-r--r--packaging/rpm-oel/mysql-systemd-start22
-rw-r--r--packaging/rpm-oel/mysql.init8
-rw-r--r--scripts/CMakeLists.txt7
-rw-r--r--scripts/mysql_setpermission.sh6
-rw-r--r--scripts/mysql_system_tables.sql2
-rw-r--r--scripts/mysqld_safe.sh1
-rwxr-xr-xsql-bench/test-table-elimination.sh2
-rw-r--r--sql-common/client.c2
-rw-r--r--sql/debug_sync.cc8
-rw-r--r--sql/field.cc2
-rw-r--r--sql/field.h23
-rw-r--r--sql/filesort.cc13
-rw-r--r--sql/handler.cc4
-rw-r--r--sql/handler.h11
-rw-r--r--sql/item.cc18
-rw-r--r--sql/item.h2
-rw-r--r--sql/item_cmpfunc.cc51
-rw-r--r--sql/item_cmpfunc.h71
-rw-r--r--sql/item_func.cc17
-rw-r--r--sql/item_func.h7
-rw-r--r--sql/item_timefunc.cc7
-rw-r--r--sql/lock.cc9
-rw-r--r--sql/log.cc7
-rw-r--r--sql/log.h2
-rw-r--r--sql/log_event.cc16
-rw-r--r--sql/log_event.h4
-rw-r--r--sql/mdl.cc1
-rw-r--r--sql/mdl.h2
-rw-r--r--sql/mysqld.cc5
-rw-r--r--sql/mysqld.h2
-rw-r--r--sql/opt_range.cc30
-rw-r--r--sql/rpl_parallel.cc120
-rw-r--r--sql/rpl_parallel.h22
-rw-r--r--sql/rpl_rli.cc28
-rw-r--r--sql/rpl_rli.h4
-rw-r--r--sql/slave.cc2
-rw-r--r--sql/sql_class.h8
-rw-r--r--sql/sql_insert.cc2
-rw-r--r--sql/sql_parse.cc124
-rw-r--r--sql/sql_parse.h3
-rw-r--r--sql/sql_plugin.cc2
-rw-r--r--sql/sql_select.cc125
-rw-r--r--sql/sql_select.h1
-rw-r--r--sql/sql_show.cc14
-rw-r--r--sql/sql_statistics.cc10
-rw-r--r--sql/sql_table.cc34
-rw-r--r--sql/sql_update.cc3
-rw-r--r--sql/sql_view.cc6
-rw-r--r--sql/sys_vars.cc27
-rw-r--r--sql/table.cc32
-rw-r--r--sql/table.h1
-rw-r--r--storage/connect/CMakeLists.txt14
-rw-r--r--storage/connect/connect.cc15
-rw-r--r--storage/connect/engmsg.h5
-rw-r--r--storage/connect/filamtxt.h1
-rw-r--r--storage/connect/ha_connect.cc81
-rw-r--r--storage/connect/ha_connect.h5
-rw-r--r--storage/connect/json.cpp1055
-rw-r--r--storage/connect/json.h246
-rw-r--r--storage/connect/maputil.h4
-rw-r--r--storage/connect/msgid.h5
-rw-r--r--storage/connect/mycat.cc13
-rw-r--r--storage/connect/myconn.cpp17
-rw-r--r--storage/connect/myconn.h3
-rw-r--r--storage/connect/mysql-test/connect/r/json.result439
-rw-r--r--storage/connect/mysql-test/connect/std_data/biblio.jsn45
-rw-r--r--storage/connect/mysql-test/connect/std_data/expense.jsn158
-rw-r--r--storage/connect/mysql-test/connect/std_data/mulexp3.jsn52
-rw-r--r--storage/connect/mysql-test/connect/std_data/mulexp4.jsn52
-rw-r--r--storage/connect/mysql-test/connect/std_data/mulexp5.jsn52
-rw-r--r--storage/connect/mysql-test/connect/t/json.test247
-rw-r--r--storage/connect/odbccat.h10
-rw-r--r--storage/connect/odbconn.cpp173
-rw-r--r--storage/connect/odbconn.h12
-rw-r--r--storage/connect/plgdbsem.h9
-rw-r--r--storage/connect/rcmsg.c3
-rw-r--r--storage/connect/reldef.cpp3
-rw-r--r--storage/connect/reldef.h2
-rw-r--r--storage/connect/tabdos.cpp3
-rw-r--r--storage/connect/tabjson.cpp1322
-rw-r--r--storage/connect/tabjson.h197
-rw-r--r--storage/connect/table.cpp2
-rw-r--r--storage/connect/tabmysql.cpp4
-rw-r--r--storage/connect/tabodbc.cpp135
-rw-r--r--storage/connect/tabodbc.h16
-rw-r--r--storage/connect/value.cpp294
-rw-r--r--storage/connect/value.h16
-rw-r--r--storage/connect/xtable.h1
-rw-r--r--storage/innobase/btr/btr0cur.cc121
-rw-r--r--storage/innobase/buf/buf0buf.cc7
-rw-r--r--storage/innobase/buf/buf0checksum.cc5
-rw-r--r--storage/innobase/buf/buf0flu.cc34
-rw-r--r--storage/innobase/dict/dict0dict.cc29
-rw-r--r--storage/innobase/fil/fil0fil.cc28
-rw-r--r--storage/innobase/fts/fts0fts.cc650
-rw-r--r--storage/innobase/fts/fts0opt.cc18
-rw-r--r--storage/innobase/handler/ha_innodb.cc53
-rw-r--r--storage/innobase/handler/handler0alter.cc9
-rw-r--r--storage/innobase/ibuf/ibuf0ibuf.cc4
-rw-r--r--storage/innobase/include/btr0cur.ic4
-rw-r--r--storage/innobase/include/buf0buf.h3
-rw-r--r--storage/innobase/include/buf0checksum.h9
-rw-r--r--storage/innobase/include/dict0dict.h2
-rw-r--r--storage/innobase/include/fil0fil.h12
-rw-r--r--storage/innobase/include/fts0priv.ic26
-rw-r--r--storage/innobase/include/mach0data.ic8
-rw-r--r--storage/innobase/include/os0file.h1
-rw-r--r--storage/innobase/include/page0page.h14
-rw-r--r--storage/innobase/include/page0page.ic19
-rw-r--r--storage/innobase/include/page0types.h2
-rw-r--r--storage/innobase/include/page0zip.h18
-rw-r--r--storage/innobase/include/rem0rec.h4
-rw-r--r--storage/innobase/include/srv0srv.h1
-rw-r--r--storage/innobase/include/trx0undo.h8
-rw-r--r--storage/innobase/include/univ.i4
-rw-r--r--storage/innobase/include/ut0ut.h5
-rw-r--r--storage/innobase/lock/lock0lock.cc27
-rw-r--r--storage/innobase/log/log0log.cc6
-rw-r--r--storage/innobase/os/os0file.cc28
-rw-r--r--storage/innobase/os/os0thread.cc9
-rw-r--r--storage/innobase/page/page0zip.cc35
-rw-r--r--storage/innobase/row/row0mysql.cc8
-rw-r--r--storage/innobase/srv/srv0start.cc70
-rw-r--r--storage/innobase/sync/sync0rw.cc1
-rw-r--r--storage/maria/ha_maria.cc29
-rw-r--r--storage/maria/ma_pagecache.c20
-rwxr-xr-xstorage/myisam/ftbench/ft-test-run.sh2
-rw-r--r--storage/myisam/ha_myisam.cc31
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/alter_table_online.rdiff82
-rw-r--r--storage/oqgraph/CMakeLists.txt2
-rw-r--r--storage/perfschema/gen_pfs_lex_token.cc96
-rw-r--r--storage/perfschema/pfs_digest.cc59
-rw-r--r--storage/sphinx/mysql-test/sphinx/union-5539.result4
-rw-r--r--storage/sphinx/mysql-test/sphinx/union-5539.test5
-rw-r--r--storage/tokudb/CMakeLists.txt33
-rw-r--r--storage/tokudb/README.md36
-rw-r--r--storage/tokudb/doc2/sysbench.update.ma10.tokudb754.loglog.pngbin0 -> 6947 bytes
-rw-r--r--storage/tokudb/doc2/sysbench.update.ma10.tokudb754.pngbin0 -> 7351 bytes
-rw-r--r--storage/tokudb/doc2/sysbench.update.ma55.tokudb753.binlog.pngbin0 -> 6723 bytes
-rw-r--r--storage/tokudb/doc2/sysbench.update.ma55.tokudb753.loglog.pngbin0 -> 7367 bytes
-rw-r--r--storage/tokudb/doc2/sysbench.update.ma55.tokudb753.pngbin0 -> 7561 bytes
-rw-r--r--storage/tokudb/doc2/sysbench.update.my55.tokudb753.loglog.pngbin0 -> 6899 bytes
-rw-r--r--storage/tokudb/doc2/sysbench.update.ps56.tokudb754.loglog.pngbin0 -> 7008 bytes
-rw-r--r--storage/tokudb/doc2/sysbench.update.ps56.tokudb754.pngbin0 -> 7434 bytes
-rw-r--r--storage/tokudb/ft-index/buildheader/make_tdb.cc1
-rw-r--r--storage/tokudb/ft-index/ft/ft-internal.h1
-rw-r--r--storage/tokudb/ft-index/ft/ft-ops.cc12
-rw-r--r--storage/tokudb/ft-index/locktree/lock_request.cc32
-rw-r--r--storage/tokudb/ft-index/locktree/lock_request.h6
-rw-r--r--storage/tokudb/ft-index/locktree/locktree.cc1
-rw-r--r--storage/tokudb/ft-index/locktree/tests/lock_request_start_retry_race.cc193
-rw-r--r--storage/tokudb/ft-index/src/ydb_row_lock.cc1
-rw-r--r--storage/tokudb/ft-index/src/ydb_txn.cc6
-rw-r--r--storage/tokudb/ha_tokudb.cc11
-rw-r--r--storage/tokudb/ha_tokudb.h2
-rw-r--r--storage/tokudb/ha_tokudb_alter_56.cc4
-rw-r--r--storage/tokudb/hatoku_hton.cc46
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/change_column_varchar.result1
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/information-schema-global-status.result1
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/bf_create_select.test8
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/bf_create_select_hash_part.test8
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/bf_create_select_range_part.test8
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/bf_create_temp_select.test8
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/bf_delete.test7
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/bf_insert_select.test6
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/bf_insert_select_dup_key.test8
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/bf_replace_select.test7
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/bf_select_hash_part.test8
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/bf_select_range_part.test8
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/change_column_varchar.test1
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/disabled.def3
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db762.result7
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db766.result7
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db768.result10
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db771.result11
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/5585-master.opt1
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/db762.test13
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/db766.test12
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/db768.test12
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/db771.test13
-rw-r--r--storage/tokudb/tokudb_update_fun.cc2
-rw-r--r--storage/xtradb/btr/btr0cur.cc121
-rw-r--r--storage/xtradb/buf/buf0buf.cc23
-rw-r--r--storage/xtradb/buf/buf0flu.cc34
-rw-r--r--storage/xtradb/dict/dict0dict.cc29
-rw-r--r--storage/xtradb/fil/fil0fil.cc28
-rw-r--r--storage/xtradb/fts/fts0fts.cc650
-rw-r--r--storage/xtradb/fts/fts0opt.cc18
-rw-r--r--storage/xtradb/handler/ha_innodb.cc53
-rw-r--r--storage/xtradb/handler/handler0alter.cc9
-rw-r--r--storage/xtradb/ibuf/ibuf0ibuf.cc4
-rw-r--r--storage/xtradb/include/btr0cur.ic4
-rw-r--r--storage/xtradb/include/buf0buf.h5
-rw-r--r--storage/xtradb/include/dict0dict.h2
-rw-r--r--storage/xtradb/include/fil0fil.h3
-rw-r--r--storage/xtradb/include/fts0priv.ic26
-rw-r--r--storage/xtradb/include/os0file.h1
-rw-r--r--storage/xtradb/include/os0stacktrace.h4
-rw-r--r--storage/xtradb/include/trx0rec.h2
-rw-r--r--storage/xtradb/include/univ.i8
-rw-r--r--storage/xtradb/lock/lock0lock.cc33
-rw-r--r--storage/xtradb/log/log0log.cc18
-rw-r--r--storage/xtradb/log/log0recv.cc3
-rw-r--r--storage/xtradb/os/os0file.cc28
-rw-r--r--storage/xtradb/os/os0thread.cc9
-rw-r--r--storage/xtradb/page/page0zip.cc13
-rw-r--r--storage/xtradb/row/row0mysql.cc8
-rw-r--r--storage/xtradb/srv/srv0srv.cc9
-rw-r--r--storage/xtradb/srv/srv0start.cc97
-rw-r--r--storage/xtradb/sync/sync0rw.cc1
-rw-r--r--support-files/mysql.spec.sh24
-rw-r--r--support-files/rpm/server-postin.sh53
-rw-r--r--support-files/rpm/server-postun.sh7
-rwxr-xr-xtests/fork_big.pl2
-rw-r--r--tests/fork_big2.pl2
-rw-r--r--vio/viossl.c46
-rw-r--r--vio/viosslfactories.c27
409 files changed, 13024 insertions, 2850 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 69fbbca3830..e681191fbbf 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -28,10 +28,12 @@ ENDIF()
# We use the LOCATION target property (CMP0026)
# and get_target_property() for non-existent targets (CMP0045)
+# and INSTALL_NAME_DIR (CMP0042)
IF(CMAKE_VERSION VERSION_EQUAL "3.0.0" OR
CMAKE_VERSION VERSION_GREATER "3.0.0")
CMAKE_POLICY(SET CMP0026 OLD)
CMAKE_POLICY(SET CMP0045 OLD)
+ CMAKE_POLICY(SET CMP0042 OLD)
ENDIF()
MESSAGE(STATUS "Running cmake version ${CMAKE_VERSION}")
@@ -108,36 +110,15 @@ FOREACH(_base
ENDFOREACH()
-
# Following autotools tradition, add preprocessor definitions
# specified in environment variable CPPFLAGS
IF(DEFINED ENV{CPPFLAGS})
ADD_DEFINITIONS($ENV{CPPFLAGS})
ENDIF()
-#
-# Control aspects of the development environment which are
-# specific to MySQL maintainers and developers.
-#
-INCLUDE(maintainer)
-
-SET(MYSQL_MAINTAINER_MODE "OFF" CACHE STRING "MySQL maintainer-specific development environment. Options are: ON OFF AUTO.")
+SET(MYSQL_MAINTAINER_MODE "AUTO" CACHE STRING "MySQL maintainer-specific development environment. Options are: ON OFF AUTO.")
MARK_AS_ADVANCED(MYSQL_MAINTAINER_MODE)
-# Whether the maintainer mode compiler options should be enabled.
-IF(CMAKE_C_COMPILER_ID MATCHES "GNU")
- SET_MYSQL_MAINTAINER_GNU_C_OPTIONS()
-ENDIF()
-IF(CMAKE_CXX_COMPILER_ID MATCHES "GNU")
- SET_MYSQL_MAINTAINER_GNU_CXX_OPTIONS()
-ENDIF()
-IF(CMAKE_C_COMPILER_ID MATCHES "Intel")
- SET_MYSQL_MAINTAINER_INTEL_C_OPTIONS()
-ENDIF()
-IF(CMAKE_CXX_COMPILER_ID MATCHES "Intel")
- SET_MYSQL_MAINTAINER_INTEL_CXX_OPTIONS()
-ENDIF()
-
# Packaging
IF (NOT CPACK_GENERATOR)
IF(WIN32)
@@ -186,7 +167,7 @@ OPTION (WITH_UNIT_TESTS "Compile MySQL with unit tests" ON)
MARK_AS_ADVANCED(CYBOZU BACKUP_TEST WITHOUT_SERVER DISABLE_SHARED)
OPTION(NOT_FOR_DISTRIBUTION "Allow linking with GPLv2-incompatible system libraries. Only set it you never plan to distribute the resulting binaries" OFF)
-
+
include(CheckCSourceCompiles)
include(CheckCXXSourceCompiles)
# We need some extra FAIL_REGEX patterns
@@ -396,18 +377,7 @@ CHECK_PCRE()
# We have to add MAINTAINER_C_WARNINGS first to ensure that the flags
# given by the invoking user are honored
#
-IF(MYSQL_MAINTAINER_MODE MATCHES "ON")
- SET(CMAKE_C_FLAGS "${MY_MAINTAINER_C_WARNINGS} ${CMAKE_C_FLAGS}")
- SET(CMAKE_CXX_FLAGS "${MY_MAINTAINER_CXX_WARNINGS} ${CMAKE_CXX_FLAGS}")
-ELSEIF(MYSQL_MAINTAINER_MODE MATCHES "AUTO")
- SET(CMAKE_C_FLAGS_DEBUG "${MY_MAINTAINER_C_WARNINGS} ${CMAKE_C_FLAGS_DEBUG}")
- SET(CMAKE_CXX_FLAGS_DEBUG "${MY_MAINTAINER_CXX_WARNINGS} ${CMAKE_CXX_FLAGS_DEBUG}")
-ENDIF()
-
-IF(CMAKE_CROSSCOMPILING)
- SET(IMPORT_EXECUTABLES "IMPORTFILE-NOTFOUND" CACHE FILEPATH "Path to import_executables.cmake from a native build")
- INCLUDE(${IMPORT_EXECUTABLES})
-ENDIF()
+INCLUDE(maintainer)
IF(WITH_UNIT_TESTS)
ENABLE_TESTING()
@@ -534,4 +504,3 @@ IF(NON_DISTRIBUTABLE_WARNING)
MESSAGE(WARNING "
You have linked MariaDB with GPLv3 libraries! You may not distribute the resulting binary. If you do, you will put yourself into a legal problem with Free Software Foundation.")
ENDIF()
-
diff --git a/VERSION b/VERSION
index 846e8094fff..ea92dd078e5 100644
--- a/VERSION
+++ b/VERSION
@@ -1,3 +1,3 @@
MYSQL_VERSION_MAJOR=10
MYSQL_VERSION_MINOR=0
-MYSQL_VERSION_PATCH=15
+MYSQL_VERSION_PATCH=16
diff --git a/client/mysqlbinlog.cc b/client/mysqlbinlog.cc
index 319be9111aa..7c93e8d3ebd 100644
--- a/client/mysqlbinlog.cc
+++ b/client/mysqlbinlog.cc
@@ -1,6 +1,6 @@
/*
- Copyright (c) 2000, 2013, Oracle and/or its affiliates.
- Copyright (c) 2009, 2013, Monty Program Ab.
+ Copyright (c) 2000, 2014, Oracle and/or its affiliates.
+ Copyright (c) 2009, 2014, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -2435,6 +2435,7 @@ int main(int argc, char** argv)
DBUG_PROCESS(argv[0]);
my_init_time(); // for time functions
+ tzset(); // set tzname
init_alloc_root(&s_mem_root, 16384, 0, MYF(0));
if (load_defaults("my", load_groups, &argc, &argv))
diff --git a/client/mysqltest.cc b/client/mysqltest.cc
index 02a075cf9b4..9b7a6e8fd19 100644
--- a/client/mysqltest.cc
+++ b/client/mysqltest.cc
@@ -5908,7 +5908,7 @@ void do_connect(struct st_command *command)
{
int con_port= opt_port;
char *con_options;
- char *ssl_cipher= 0;
+ char *ssl_cipher __attribute__((unused))= 0;
my_bool con_ssl= 0, con_compress= 0;
my_bool con_pipe= 0;
my_bool con_shm __attribute__ ((unused))= 0;
diff --git a/cmake/build_configurations/mysql_release.cmake b/cmake/build_configurations/mysql_release.cmake
index 7e5ce563b15..02492f2dbc9 100644
--- a/cmake/build_configurations/mysql_release.cmake
+++ b/cmake/build_configurations/mysql_release.cmake
@@ -202,15 +202,16 @@ IF(UNIX)
ENDIF()
ENDIF()
- # OSX flags
- IF(APPLE)
- SET(COMMON_C_FLAGS "-g -fno-common -fno-strict-aliasing")
- # XXX: why are we using -felide-constructors on OSX?
- SET(COMMON_CXX_FLAGS "-g -fno-common -felide-constructors -fno-strict-aliasing")
- SET(CMAKE_C_FLAGS_DEBUG "-O ${COMMON_C_FLAGS}")
- SET(CMAKE_CXX_FLAGS_DEBUG "-O ${COMMON_CXX_FLAGS}")
- SET(CMAKE_C_FLAGS_RELWITHDEBINFO "-Os ${COMMON_C_FLAGS}")
- SET(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-Os ${COMMON_CXX_FLAGS}")
+ # Default Clang flags
+ IF(CMAKE_C_COMPILER_ID MATCHES "Clang")
+ SET(COMMON_C_FLAGS "-g -fno-omit-frame-pointer -fno-strict-aliasing")
+ SET(CMAKE_C_FLAGS_DEBUG "${COMMON_C_FLAGS}")
+ SET(CMAKE_C_FLAGS_RELWITHDEBINFO "-O3 ${COMMON_C_FLAGS}")
+ ENDIF()
+ IF(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
+ SET(COMMON_CXX_FLAGS "-g -fno-omit-frame-pointer -fno-strict-aliasing")
+ SET(CMAKE_CXX_FLAGS_DEBUG "${COMMON_CXX_FLAGS}")
+ SET(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-O3 ${COMMON_CXX_FLAGS}")
ENDIF()
# Solaris flags
diff --git a/cmake/compile_flags.cmake b/cmake/compile_flags.cmake
new file mode 100644
index 00000000000..5e872f981b0
--- /dev/null
+++ b/cmake/compile_flags.cmake
@@ -0,0 +1,44 @@
+# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+## ADD_COMPILE_FLAGS(<source files> COMPILE_FLAGS <flags>)
+MACRO(ADD_COMPILE_FLAGS)
+ SET(FILES "")
+ SET(FLAGS "")
+ SET(COMPILE_FLAGS)
+ FOREACH(ARG ${ARGV})
+ IF(ARG STREQUAL "COMPILE_FLAGS")
+ SET(COMPILE_FLAGS "COMPILE_FLAGS")
+ ELSEIF(COMPILE_FLAGS)
+ LIST(APPEND FLAGS ${ARG})
+ ELSE()
+ LIST(APPEND FILES ${ARG})
+ ENDIF()
+ ENDFOREACH()
+ FOREACH(FILE ${FILES})
+ FOREACH(FLAG ${FLAGS})
+ GET_SOURCE_FILE_PROPERTY(PROP ${FILE} COMPILE_FLAGS)
+ IF(NOT PROP)
+ SET(PROP ${FLAG})
+ ELSE()
+ SET(PROP "${PROP} ${FLAG}")
+ ENDIF()
+ SET_SOURCE_FILES_PROPERTIES(
+ ${FILE} PROPERTIES COMPILE_FLAGS "${PROP}"
+ )
+ ENDFOREACH()
+ ENDFOREACH()
+ENDMACRO()
diff --git a/cmake/cpack_rpm.cmake b/cmake/cpack_rpm.cmake
index f777db21015..acbb017d75c 100644
--- a/cmake/cpack_rpm.cmake
+++ b/cmake/cpack_rpm.cmake
@@ -189,6 +189,7 @@ ELSEIF(RPM MATCHES "fedora" OR RPM MATCHES "(rhel|centos)7")
ALTERNATIVE_NAME("client" "mysql")
ALTERNATIVE_NAME("devel" "mariadb-devel")
ALTERNATIVE_NAME("server" "mariadb-server")
+ ALTERNATIVE_NAME("server" "mysql-compat-server")
ALTERNATIVE_NAME("shared" "mariadb-libs")
ALTERNATIVE_NAME("shared" "mysql-libs")
ALTERNATIVE_NAME("test" "mariadb-test")
diff --git a/cmake/libutils.cmake b/cmake/libutils.cmake
index 8f85b988f8d..8fcfe294f17 100644
--- a/cmake/libutils.cmake
+++ b/cmake/libutils.cmake
@@ -304,6 +304,8 @@ FUNCTION(GET_DEPENDEND_OS_LIBS target result)
SET(${result} ${ret} PARENT_SCOPE)
ENDFUNCTION()
+INCLUDE(CheckCCompilerFlag)
+
SET(VISIBILITY_HIDDEN_FLAG)
IF(CMAKE_COMPILER_IS_GNUCXX AND UNIX)
diff --git a/cmake/maintainer.cmake b/cmake/maintainer.cmake
index 874e05b8a43..872d61d9aac 100644
--- a/cmake/maintainer.cmake
+++ b/cmake/maintainer.cmake
@@ -1,4 +1,4 @@
-# Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -13,51 +13,46 @@
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
-INCLUDE(CheckCCompilerFlag)
-
-# Setup GCC (GNU C compiler) warning options.
-MACRO(SET_MYSQL_MAINTAINER_GNU_C_OPTIONS)
- SET(MY_MAINTAINER_WARNINGS
- "-Wall -Wextra -Wunused -Wwrite-strings -Wno-strict-aliasing -DFORCE_INIT_OF_VARS")
-
- CHECK_C_COMPILER_FLAG("-Wno-missing-field-initializers"
- HAVE_NO_MISSING_FIELD_INITIALIZERS)
-
- IF (HAVE_NO_MISSING_FIELD_INITIALIZERS)
- SET(MY_MAINTAINER_WARNINGS
- "${MY_MAINTAINER_WARNINGS} -Wno-missing-field-initializers")
- ENDIF()
-
- CHECK_C_COMPILER_FLAG("-Wdeclaration-after-statement"
- HAVE_DECLARATION_AFTER_STATEMENT)
- IF(HAVE_DECLARATION_AFTER_STATEMENT)
- SET(MY_MAINTAINER_DECLARATION_AFTER_STATEMENT
- "-Wdeclaration-after-statement")
- ENDIF()
- SET(MY_MAINTAINER_C_WARNINGS
- "${MY_MAINTAINER_WARNINGS} ${MY_MAINTAINER_DECLARATION_AFTER_STATEMENT}"
- CACHE INTERNAL "C warning options used in maintainer builds.")
- # Do not make warnings in checks into errors.
- SET(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -Wno-error")
-ENDMACRO()
-
-# Setup G++ (GNU C++ compiler) warning options.
-MACRO(SET_MYSQL_MAINTAINER_GNU_CXX_OPTIONS)
- SET(MY_MAINTAINER_CXX_WARNINGS
- "${MY_MAINTAINER_WARNINGS} -Wno-invalid-offsetof -Wno-unused-parameter -Woverloaded-virtual"
- CACHE INTERNAL "C++ warning options used in maintainer builds.")
-ENDMACRO()
-
-# Setup ICC (Intel C Compiler) warning options.
-MACRO(SET_MYSQL_MAINTAINER_INTEL_C_OPTIONS)
- SET(MY_MAINTAINER_WARNINGS "-Wcheck")
- SET(MY_MAINTAINER_C_WARNINGS "${MY_MAINTAINER_WARNINGS}"
- CACHE INTERNAL "C warning options used in maintainer builds.")
-ENDMACRO()
-
-# Setup ICPC (Intel C++ Compiler) warning options.
-MACRO(SET_MYSQL_MAINTAINER_INTEL_CXX_OPTIONS)
- SET(MY_MAINTAINER_CXX_WARNINGS "${MY_MAINTAINER_WARNINGS}"
- CACHE INTERNAL "C++ warning options used in maintainer builds.")
-ENDMACRO()
-
+# Common warning flags for GCC, G++, Clang and Clang++
+SET(MY_WARNING_FLAGS "-Wall -Wextra -Wformat-security")
+MY_CHECK_C_COMPILER_FLAG("-Wvla" HAVE_WVLA) # Requires GCC 4.3+ or Clang
+IF(HAVE_WVLA)
+ SET(MY_WARNING_FLAGS "${MY_WARNING_FLAGS} -Wvla")
+ENDIF()
+
+# Common warning flags for GCC and Clang
+SET(MY_C_WARNING_FLAGS
+ "${MY_WARNING_FLAGS} -Wwrite-strings -Wdeclaration-after-statement")
+
+# Common warning flags for G++ and Clang++
+SET(MY_CXX_WARNING_FLAGS
+ "${MY_WARNING_FLAGS} -Woverloaded-virtual -Wno-unused-parameter")
+
+# Extra warning flags for Clang++
+IF(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
+ SET(MY_CXX_WARNING_FLAGS
+ "${MY_CXX_WARNING_FLAGS} -Wno-null-conversion -Wno-unused-private-field")
+ENDIF()
+
+# Turn on Werror (warning => error) when using maintainer mode.
+IF(MYSQL_MAINTAINER_MODE MATCHES "ON")
+ SET(MY_C_WARNING_FLAGS "${MY_C_WARNING_FLAGS} -DFORCE_INIT_OF_VARS -Werror")
+ SET(MY_CXX_WARNING_FLAGS "${MY_CXX_WARNING_FLAGS} -DFORCE_INIT_OF_VARS -Werror")
+ENDIF()
+
+# Set warning flags for GCC/Clang
+IF(CMAKE_COMPILER_IS_GNUCC OR CMAKE_C_COMPILER_ID MATCHES "Clang")
+ SET(MY_MAINTAINER_C_WARNINGS "${MY_C_WARNING_FLAGS}")
+ENDIF()
+# Set warning flags for G++/Clang++
+IF(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
+ SET(MY_MAINTAINER_CXX_WARNINGS "${MY_CXX_WARNING_FLAGS}")
+ENDIF()
+
+IF(MYSQL_MAINTAINER_MODE MATCHES "ON")
+ SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${MY_MAINTAINER_C_WARNINGS}")
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${MY_MAINTAINER_CXX_WARNINGS}")
+ELSEIF(MYSQL_MAINTAINER_MODE MATCHES "AUTO")
+ SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ${MY_MAINTAINER_C_WARNINGS}")
+ SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${MY_MAINTAINER_CXX_WARNINGS}")
+ENDIF()
diff --git a/cmake/os/Darwin.cmake b/cmake/os/Darwin.cmake
index 0d8bac106f0..4ac72070e8b 100644
--- a/cmake/os/Darwin.cmake
+++ b/cmake/os/Darwin.cmake
@@ -1,5 +1,4 @@
-# Copyright (c) 2010 Sun Microsystems, Inc.
-# Use is subject to license terms.
+# Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -15,21 +14,3 @@
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# This file includes OSX specific options and quirks, related to system checks
-
-# Workaround for CMake bug#9051
-# (CMake does not pass CMAKE_OSX_SYSROOT and CMAKE_OSX_DEPLOYMENT_TARGET when
-# running TRY_COMPILE)
-
-IF(CMAKE_OSX_SYSROOT)
- SET(ENV{CMAKE_OSX_SYSROOT} ${CMAKE_OSX_SYSROOT})
-ENDIF()
-IF(CMAKE_OSX_SYSROOT)
- SET(ENV{MACOSX_DEPLOYMENT_TARGET} ${OSX_DEPLOYMENT_TARGET})
-ENDIF()
-
-IF(CMAKE_OSX_DEPLOYMENT_TARGET)
- # Workaround linker problems on OSX 10.4
- IF(CMAKE_OSX_DEPLOYMENT_TARGET VERSION_LESS "10.5")
- ADD_DEFINITIONS(-fno-common)
- ENDIF()
-ENDIF()
diff --git a/cmake/os/WindowsCache.cmake b/cmake/os/WindowsCache.cmake
index 1453cc55053..c75f2e45bf6 100644
--- a/cmake/os/WindowsCache.cmake
+++ b/cmake/os/WindowsCache.cmake
@@ -1,4 +1,4 @@
-# Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -90,7 +90,7 @@ SET(HAVE_GETRLIMIT CACHE INTERNAL "")
SET(HAVE_GETRUSAGE CACHE INTERNAL "")
SET(HAVE_GETTIMEOFDAY CACHE INTERNAL "")
SET(HAVE_GETWD CACHE INTERNAL "")
-SET(HAVE_GMTIME_R CACHE INTERNAL "")
+SET(HAVE_GMTIME_R 1 CACHE INTERNAL "")
SET(HAVE_GRP_H CACHE INTERNAL "")
SET(HAVE_IA64INTRIN_H CACHE INTERNAL "")
SET(HAVE_IEEEFP_H CACHE INTERNAL "")
@@ -111,7 +111,7 @@ SET(HAVE_LANGINFO_H CACHE INTERNAL "")
SET(HAVE_LDIV 1 CACHE INTERNAL "")
SET(HAVE_LIMITS_H 1 CACHE INTERNAL "")
SET(HAVE_LOCALE_H 1 CACHE INTERNAL "")
-SET(HAVE_LOCALTIME_R CACHE INTERNAL "")
+SET(HAVE_LOCALTIME_R 1 CACHE INTERNAL "")
SET(HAVE_LOG2 CACHE INTERNAL "")
SET(HAVE_LONGJMP 1 CACHE INTERNAL "")
SET(HAVE_LRAND48 CACHE INTERNAL "")
diff --git a/configure.cmake b/configure.cmake
index d16a82a2309..215b0dc32b4 100644
--- a/configure.cmake
+++ b/configure.cmake
@@ -52,16 +52,6 @@ IF(NOT SYSTEM_TYPE)
ENDIF()
ENDIF()
-
-# Always enable -Wall for gnu C/C++
-IF(CMAKE_COMPILER_IS_GNUCXX AND NOT CMAKE_CXX_FLAGS MATCHES ".*-Wall.*")
- SET(CMAKE_CXX_FLAGS "-Wall ${CMAKE_CXX_FLAGS} -Wall -Wno-unused-parameter")
-ENDIF()
-IF(CMAKE_COMPILER_IS_GNUCC AND NOT CMAKE_C_FLAGS MATCHES ".*-Wall.*")
- SET(CMAKE_C_FLAGS "-Wall ${CMAKE_C_FLAGS} -Wall")
-ENDIF()
-
-
IF(CMAKE_COMPILER_IS_GNUCXX)
# MySQL "canonical" GCC flags. At least -fno-rtti flag affects
# ABI and cannot be simply removed.
diff --git a/extra/CMakeLists.txt b/extra/CMakeLists.txt
index f8f71b00743..585b5aef6f6 100644
--- a/extra/CMakeLists.txt
+++ b/extra/CMakeLists.txt
@@ -75,11 +75,29 @@ ENDIF()
MYSQL_ADD_EXECUTABLE(replace replace.c COMPONENT Server)
TARGET_LINK_LIBRARIES(replace mysys)
IF(UNIX)
- MYSQL_ADD_EXECUTABLE(innochecksum innochecksum.c)
-
MYSQL_ADD_EXECUTABLE(resolve_stack_dump resolve_stack_dump.c)
TARGET_LINK_LIBRARIES(resolve_stack_dump mysys)
MYSQL_ADD_EXECUTABLE(mysql_waitpid mysql_waitpid.c COMPONENT Client)
TARGET_LINK_LIBRARIES(mysql_waitpid mysys)
ENDIF()
+
+
+ # Add path to the InnoDB headers
+ INCLUDE_DIRECTORIES(
+ ${CMAKE_SOURCE_DIR}/storage/innobase/include
+ ${CMAKE_SOURCE_DIR}/sql)
+
+ # We use the InnoDB code directly in case the code changes.
+ ADD_DEFINITIONS("-DUNIV_INNOCHECKSUM")
+ SET(INNOBASE_SOURCES
+ ../storage/innobase/buf/buf0checksum.cc
+ ../storage/innobase/ut/ut0crc32.cc
+ ../storage/innobase/ut/ut0ut.cc
+ ../storage/innobase/page/page0zip.cc
+ )
+
+ MYSQL_ADD_EXECUTABLE(innochecksum innochecksum.cc ${INNOBASE_SOURCES})
+ TARGET_LINK_LIBRARIES(innochecksum mysys mysys_ssl)
+ ADD_DEPENDENCIES(innochecksum GenError)
+
diff --git a/extra/innochecksum.c b/extra/innochecksum.c
deleted file mode 100644
index ed4dfc48789..00000000000
--- a/extra/innochecksum.c
+++ /dev/null
@@ -1,325 +0,0 @@
-/*
- Copyright (c) 2005, 2011, Oracle and/or its affiliates
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
-*/
-
-/*
- InnoDB offline file checksum utility. 85% of the code in this file
- was taken wholesale fron the InnoDB codebase.
-
- The final 15% was originally written by Mark Smith of Danga
- Interactive, Inc. <junior@danga.com>
-
- Published with a permission.
-*/
-
-#include <my_global.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <time.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-
-/* all of these ripped from InnoDB code from MySQL 4.0.22 */
-#define UT_HASH_RANDOM_MASK 1463735687
-#define UT_HASH_RANDOM_MASK2 1653893711
-#define FIL_PAGE_LSN 16
-#define FIL_PAGE_FILE_FLUSH_LSN 26
-#define FIL_PAGE_OFFSET 4
-#define FIL_PAGE_DATA 38
-#define FIL_PAGE_END_LSN_OLD_CHKSUM 8
-#define FIL_PAGE_SPACE_OR_CHKSUM 0
-#define UNIV_PAGE_SIZE (2 * 8192)
-
-/* command line argument to do page checks (that's it) */
-/* another argument to specify page ranges... seek to right spot and go from there */
-
-typedef unsigned long int ulint;
-
-/* innodb function in name; modified slightly to not have the ASM version (lots of #ifs that didn't apply) */
-ulint mach_read_from_4(uchar *b)
-{
- return( ((ulint)(b[0]) << 24)
- + ((ulint)(b[1]) << 16)
- + ((ulint)(b[2]) << 8)
- + (ulint)(b[3])
- );
-}
-
-ulint
-ut_fold_ulint_pair(
-/*===============*/
- /* out: folded value */
- ulint n1, /* in: ulint */
- ulint n2) /* in: ulint */
-{
- return(((((n1 ^ n2 ^ UT_HASH_RANDOM_MASK2) << 8) + n1)
- ^ UT_HASH_RANDOM_MASK) + n2);
-}
-
-ulint
-ut_fold_binary(
-/*===========*/
- /* out: folded value */
- uchar* str, /* in: string of bytes */
- ulint len) /* in: length */
-{
- ulint i;
- ulint fold= 0;
-
- for (i= 0; i < len; i++)
- {
- fold= ut_fold_ulint_pair(fold, (ulint)(*str));
-
- str++;
- }
-
- return(fold);
-}
-
-ulint
-buf_calc_page_new_checksum(
-/*=======================*/
- /* out: checksum */
- uchar* page) /* in: buffer page */
-{
- ulint checksum;
-
- /* Since the fields FIL_PAGE_FILE_FLUSH_LSN and ..._ARCH_LOG_NO
- are written outside the buffer pool to the first pages of data
- files, we have to skip them in the page checksum calculation.
- We must also skip the field FIL_PAGE_SPACE_OR_CHKSUM where the
- checksum is stored, and also the last 8 bytes of page because
- there we store the old formula checksum. */
-
- checksum= ut_fold_binary(page + FIL_PAGE_OFFSET,
- FIL_PAGE_FILE_FLUSH_LSN - FIL_PAGE_OFFSET)
- + ut_fold_binary(page + FIL_PAGE_DATA,
- UNIV_PAGE_SIZE - FIL_PAGE_DATA
- - FIL_PAGE_END_LSN_OLD_CHKSUM);
- checksum= checksum & 0xFFFFFFFF;
-
- return(checksum);
-}
-
-ulint
-buf_calc_page_old_checksum(
-/*=======================*/
- /* out: checksum */
- uchar* page) /* in: buffer page */
-{
- ulint checksum;
-
- checksum= ut_fold_binary(page, FIL_PAGE_FILE_FLUSH_LSN);
-
- checksum= checksum & 0xFFFFFFFF;
-
- return(checksum);
-}
-
-
-int main(int argc, char **argv)
-{
- FILE *f; /* our input file */
- uchar *p; /* storage of pages read */
- int bytes; /* bytes read count */
- ulint ct; /* current page number (0 based) */
- int now; /* current time */
- int lastt; /* last time */
- ulint oldcsum, oldcsumfield, csum, csumfield, logseq, logseqfield; /* ulints for checksum storage */
- struct stat st; /* for stat, if you couldn't guess */
- unsigned long long int size; /* size of file (has to be 64 bits) */
- ulint pages; /* number of pages in file */
- ulint start_page= 0, end_page= 0, use_end_page= 0; /* for starting and ending at certain pages */
- off_t offset= 0;
- int just_count= 0; /* if true, just print page count */
- int verbose= 0;
- int debug= 0;
- int c;
- int fd;
-
- /* remove arguments */
- while ((c= getopt(argc, argv, "cvds:e:p:")) != -1)
- {
- switch (c)
- {
- case 'v':
- verbose= 1;
- break;
- case 'c':
- just_count= 1;
- break;
- case 's':
- start_page= atoi(optarg);
- break;
- case 'e':
- end_page= atoi(optarg);
- use_end_page= 1;
- break;
- case 'p':
- start_page= atoi(optarg);
- end_page= atoi(optarg);
- use_end_page= 1;
- break;
- case 'd':
- debug= 1;
- break;
- case ':':
- fprintf(stderr, "option -%c requires an argument\n", optopt);
- return 1;
- break;
- case '?':
- fprintf(stderr, "unrecognized option: -%c\n", optopt);
- return 1;
- break;
- }
- }
-
- /* debug implies verbose... */
- if (debug) verbose= 1;
-
- /* make sure we have the right arguments */
- if (optind >= argc)
- {
- printf("InnoDB offline file checksum utility.\n");
- printf("usage: %s [-c] [-s <start page>] [-e <end page>] [-p <page>] [-v] [-d] <filename>\n", argv[0]);
- printf("\t-c\tprint the count of pages in the file\n");
- printf("\t-s n\tstart on this page number (0 based)\n");
- printf("\t-e n\tend at this page number (0 based)\n");
- printf("\t-p n\tcheck only this page (0 based)\n");
- printf("\t-v\tverbose (prints progress every 5 seconds)\n");
- printf("\t-d\tdebug mode (prints checksums for each page)\n");
- return 1;
- }
-
- /* stat the file to get size and page count */
- if (stat(argv[optind], &st))
- {
- perror("error statting file");
- return 1;
- }
- size= st.st_size;
- pages= size / UNIV_PAGE_SIZE;
- if (just_count)
- {
- printf("%lu\n", pages);
- return 0;
- }
- else if (verbose)
- {
- printf("file %s = %llu bytes (%lu pages)...\n", argv[optind], size, pages);
- printf("checking pages in range %lu to %lu\n", start_page, use_end_page ? end_page : (pages - 1));
- }
-
- /* open the file for reading */
- f= fopen(argv[optind], "r");
- if (!f)
- {
- perror("error opening file");
- return 1;
- }
-
- /* seek to the necessary position */
- if (start_page)
- {
- fd= fileno(f);
- if (!fd)
- {
- perror("unable to obtain file descriptor number");
- return 1;
- }
-
- offset= (off_t)start_page * (off_t)UNIV_PAGE_SIZE;
-
- if (lseek(fd, offset, SEEK_SET) != offset)
- {
- perror("unable to seek to necessary offset");
- return 1;
- }
- }
-
- /* allocate buffer for reading (so we don't realloc every time) */
- p= (uchar *)malloc(UNIV_PAGE_SIZE);
-
- /* main checksumming loop */
- ct= start_page;
- lastt= 0;
- while (!feof(f))
- {
- bytes= fread(p, 1, UNIV_PAGE_SIZE, f);
- if (!bytes && feof(f)) return 0;
- if (bytes != UNIV_PAGE_SIZE)
- {
- fprintf(stderr, "bytes read (%d) doesn't match universal page size (%d)\n", bytes, UNIV_PAGE_SIZE);
- return 1;
- }
-
- /* check the "stored log sequence numbers" */
- logseq= mach_read_from_4(p + FIL_PAGE_LSN + 4);
- logseqfield= mach_read_from_4(p + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN_OLD_CHKSUM + 4);
- if (debug)
- printf("page %lu: log sequence number: first = %lu; second = %lu\n", ct, logseq, logseqfield);
- if (logseq != logseqfield)
- {
- fprintf(stderr, "page %lu invalid (fails log sequence number check)\n", ct);
- return 1;
- }
-
- /* check old method of checksumming */
- oldcsum= buf_calc_page_old_checksum(p);
- oldcsumfield= mach_read_from_4(p + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN_OLD_CHKSUM);
- if (debug)
- printf("page %lu: old style: calculated = %lu; recorded = %lu\n", ct, oldcsum, oldcsumfield);
- if (oldcsumfield != mach_read_from_4(p + FIL_PAGE_LSN) && oldcsumfield != oldcsum)
- {
- fprintf(stderr, "page %lu invalid (fails old style checksum)\n", ct);
- return 1;
- }
-
- /* now check the new method */
- csum= buf_calc_page_new_checksum(p);
- csumfield= mach_read_from_4(p + FIL_PAGE_SPACE_OR_CHKSUM);
- if (debug)
- printf("page %lu: new style: calculated = %lu; recorded = %lu\n", ct, csum, csumfield);
- if (csumfield != 0 && csum != csumfield)
- {
- fprintf(stderr, "page %lu invalid (fails new style checksum)\n", ct);
- return 1;
- }
-
- /* end if this was the last page we were supposed to check */
- if (use_end_page && (ct >= end_page))
- return 0;
-
- /* do counter increase and progress printing */
- ct++;
- if (verbose)
- {
- if (ct % 64 == 0)
- {
- now= time(0);
- if (!lastt) lastt= now;
- if (now - lastt >= 1)
- {
- printf("page %lu okay: %.3f%% done\n", (ct - 1), (float) ct / pages * 100);
- lastt= now;
- }
- }
- }
- }
- return 0;
-}
-
diff --git a/extra/innochecksum.cc b/extra/innochecksum.cc
new file mode 100644
index 00000000000..5a0f7c630d3
--- /dev/null
+++ b/extra/innochecksum.cc
@@ -0,0 +1,470 @@
+/*
+ Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+/*
+ InnoDB offline file checksum utility. 85% of the code in this utility
+ is included from the InnoDB codebase.
+
+ The final 15% was originally written by Mark Smith of Danga
+ Interactive, Inc. <junior@danga.com>
+
+ Published with a permission.
+*/
+
+#include <my_global.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifndef __WIN__
+# include <unistd.h>
+#endif
+#include <my_getopt.h>
+#include <m_string.h>
+#include <welcome_copyright_notice.h> /* ORACLE_WELCOME_COPYRIGHT_NOTICE */
+#include <string.h>
+
+/* Only parts of these files are included from the InnoDB codebase.
+The parts not included are excluded by #ifndef UNIV_INNOCHECKSUM. */
+
+#include "univ.i" /* include all of this */
+
+#define FLST_NODE_SIZE (2 * FIL_ADDR_SIZE)
+#define FSEG_PAGE_DATA FIL_PAGE_DATA
+
+#include "ut0ut.h"
+#include "ut0byte.h"
+#include "mach0data.h"
+#include "fsp0types.h"
+#include "rem0rec.h"
+#include "buf0checksum.h" /* buf_calc_page_*() */
+#include "fil0fil.h" /* FIL_* */
+#include "page0page.h" /* PAGE_* */
+#include "page0zip.h" /* page_zip_*() */
+#include "trx0undo.h" /* TRX_* */
+#include "fsp0fsp.h" /* fsp_flags_get_page_size() &
+ fsp_flags_get_zip_size() */
+#include "mach0data.h" /* mach_read_from_4() */
+#include "ut0crc32.h" /* ut_crc32_init() */
+
+#ifdef UNIV_NONINL
+# include "fsp0fsp.ic"
+# include "mach0data.ic"
+# include "ut0rnd.ic"
+#endif
+
+/* Global variables */
+static my_bool verbose;
+static my_bool debug;
+static my_bool skip_corrupt;
+static my_bool just_count;
+static ulong start_page;
+static ulong end_page;
+static ulong do_page;
+static my_bool use_end_page;
+static my_bool do_one_page;
+ulong srv_page_size; /* replaces declaration in srv0srv.c */
+static ulong physical_page_size; /* Page size in bytes on disk. */
+static ulong logical_page_size; /* Page size when uncompressed. */
+static bool compressed= false; /* Is tablespace compressed */
+
+/* Get the page size of the filespace from the filespace header. */
+static
+my_bool
+get_page_size(
+/*==========*/
+ FILE* f, /*!< in: file pointer, must be open
+ and set to start of file */
+ byte* buf, /*!< in: buffer used to read the page */
+ ulong* logical_page_size, /*!< out: Logical/Uncompressed page size */
+ ulong* physical_page_size) /*!< out: Physical/Commpressed page size */
+{
+ ulong flags;
+
+ int bytes= fread(buf, 1, UNIV_PAGE_SIZE_MIN, f);
+
+ if (ferror(f))
+ {
+ perror("Error reading file header");
+ return FALSE;
+ }
+
+ if (bytes != UNIV_PAGE_SIZE_MIN)
+ {
+ fprintf(stderr, "Error; Was not able to read the minimum page size ");
+ fprintf(stderr, "of %d bytes. Bytes read was %d\n", UNIV_PAGE_SIZE_MIN, bytes);
+ return FALSE;
+ }
+
+ rewind(f);
+
+ flags = mach_read_from_4(buf + FIL_PAGE_DATA + FSP_SPACE_FLAGS);
+
+ /* srv_page_size is used by InnoDB code as UNIV_PAGE_SIZE */
+ srv_page_size = *logical_page_size = fsp_flags_get_page_size(flags);
+
+ /* fsp_flags_get_zip_size() will return zero if not compressed. */
+ *physical_page_size = fsp_flags_get_zip_size(flags);
+ if (*physical_page_size == 0)
+ {
+ *physical_page_size= *logical_page_size;
+ }
+ else
+ {
+ compressed= true;
+ }
+ return TRUE;
+}
+
+
+/* command line argument to do page checks (that's it) */
+/* another argument to specify page ranges... seek to right spot and go from there */
+
+static struct my_option innochecksum_options[] =
+{
+ {"help", '?', "Displays this help and exits.",
+ 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"info", 'I', "Synonym for --help.",
+ 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"version", 'V', "Displays version information and exits.",
+ 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"verbose", 'v', "Verbose (prints progress every 5 seconds).",
+ &verbose, &verbose, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"debug", 'd', "Debug mode (prints checksums for each page, implies verbose).",
+ &debug, &debug, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"skip_corrupt", 'u', "Skip corrupt pages.",
+ &skip_corrupt, &skip_corrupt, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"count", 'c', "Print the count of pages in the file.",
+ &just_count, &just_count, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"start_page", 's', "Start on this page number (0 based).",
+ &start_page, &start_page, 0, GET_ULONG, REQUIRED_ARG,
+ 0, 0, (longlong) 2L*1024L*1024L*1024L, 0, 1, 0},
+ {"end_page", 'e', "End at this page number (0 based).",
+ &end_page, &end_page, 0, GET_ULONG, REQUIRED_ARG,
+ 0, 0, (longlong) 2L*1024L*1024L*1024L, 0, 1, 0},
+ {"page", 'p', "Check only this page (0 based).",
+ &do_page, &do_page, 0, GET_ULONG, REQUIRED_ARG,
+ 0, 0, (longlong) 2L*1024L*1024L*1024L, 0, 1, 0},
+ {0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
+};
+
+static void print_version(void)
+{
+ printf("%s Ver %s, for %s (%s)\n",
+ my_progname, INNODB_VERSION_STR,
+ SYSTEM_TYPE, MACHINE_TYPE);
+}
+
+static void usage(void)
+{
+ print_version();
+ puts(ORACLE_WELCOME_COPYRIGHT_NOTICE("2000"));
+ printf("InnoDB offline file checksum utility.\n");
+ printf("Usage: %s [-c] [-s <start page>] [-e <end page>] [-p <page>] [-v] [-d] <filename>\n", my_progname);
+ my_print_help(innochecksum_options);
+ my_print_variables(innochecksum_options);
+}
+
+extern "C" my_bool
+innochecksum_get_one_option(
+/*========================*/
+ int optid,
+ const struct my_option *opt __attribute__((unused)),
+ char *argument __attribute__((unused)))
+{
+ switch (optid) {
+ case 'd':
+ verbose=1; /* debug implies verbose... */
+ break;
+ case 'e':
+ use_end_page= 1;
+ break;
+ case 'p':
+ end_page= start_page= do_page;
+ use_end_page= 1;
+ do_one_page= 1;
+ break;
+ case 'V':
+ print_version();
+ exit(0);
+ break;
+ case 'I':
+ case '?':
+ usage();
+ exit(0);
+ break;
+ }
+ return 0;
+}
+
+static int get_options(
+/*===================*/
+ int *argc,
+ char ***argv)
+{
+ int ho_error;
+
+ if ((ho_error=handle_options(argc, argv, innochecksum_options, innochecksum_get_one_option)))
+ exit(ho_error);
+
+ /* The next arg must be the filename */
+ if (!*argc)
+ {
+ usage();
+ return 1;
+ }
+ return 0;
+} /* get_options */
+
+int main(int argc, char **argv)
+{
+ FILE* f; /* our input file */
+ char* filename; /* our input filename. */
+ unsigned char *big_buf, *buf;
+
+ ulong bytes; /* bytes read count */
+ ulint ct; /* current page number (0 based) */
+ time_t now; /* current time */
+ time_t lastt; /* last time */
+ ulint oldcsum, oldcsumfield, csum, csumfield, crc32, logseq, logseqfield;
+ /* ulints for checksum storage */
+ struct stat st; /* for stat, if you couldn't guess */
+ unsigned long long int size; /* size of file (has to be 64 bits) */
+ ulint pages; /* number of pages in file */
+ off_t offset= 0;
+ int fd;
+
+ printf("InnoDB offline file checksum utility.\n");
+
+ ut_crc32_init();
+
+ MY_INIT(argv[0]);
+
+ if (get_options(&argc,&argv))
+ exit(1);
+
+ if (verbose)
+ my_print_variables(innochecksum_options);
+
+ /* The file name is not optional */
+ filename = *argv;
+ if (*filename == '\0')
+ {
+ fprintf(stderr, "Error; File name missing\n");
+ return 1;
+ }
+
+ /* stat the file to get size and page count */
+ if (stat(filename, &st))
+ {
+ fprintf(stderr, "Error; %s cannot be found\n", filename);
+ return 1;
+ }
+ size= st.st_size;
+
+ /* Open the file for reading */
+ f= fopen(filename, "rb");
+ if (f == NULL)
+ {
+ fprintf(stderr, "Error; %s cannot be opened", filename);
+ perror(" ");
+ return 1;
+ }
+
+ big_buf = (unsigned char *)malloc(2 * UNIV_PAGE_SIZE_MAX);
+ if (big_buf == NULL)
+ {
+ fprintf(stderr, "Error; failed to allocate memory\n");
+ perror("");
+ return 1;
+ }
+
+ /* Make sure the page is aligned */
+ buf = (unsigned char*)ut_align_down(big_buf
+ + UNIV_PAGE_SIZE_MAX, UNIV_PAGE_SIZE_MAX);
+
+ if (!get_page_size(f, buf, &logical_page_size, &physical_page_size))
+ {
+ free(big_buf);
+ return 1;
+ }
+
+ if (compressed)
+ {
+ printf("Table is compressed\n");
+ printf("Key block size is %lu\n", physical_page_size);
+ }
+ else
+ {
+ printf("Table is uncompressed\n");
+ printf("Page size is %lu\n", physical_page_size);
+ }
+
+ pages= (ulint) (size / physical_page_size);
+
+ if (just_count)
+ {
+ if (verbose)
+ printf("Number of pages: ");
+ printf("%lu\n", pages);
+ free(big_buf);
+ return 0;
+ }
+ else if (verbose)
+ {
+ printf("file %s = %llu bytes (%lu pages)...\n", filename, size, pages);
+ if (do_one_page)
+ printf("InnoChecksum; checking page %lu\n", do_page);
+ else
+ printf("InnoChecksum; checking pages in range %lu to %lu\n", start_page, use_end_page ? end_page : (pages - 1));
+ }
+
+#ifdef UNIV_LINUX
+ if (posix_fadvise(fileno(f), 0, 0, POSIX_FADV_SEQUENTIAL) ||
+ posix_fadvise(fileno(f), 0, 0, POSIX_FADV_NOREUSE))
+ {
+ perror("posix_fadvise failed");
+ }
+#endif
+
+ /* seek to the necessary position */
+ if (start_page)
+ {
+ fd= fileno(f);
+ if (!fd)
+ {
+ perror("Error; Unable to obtain file descriptor number");
+ free(big_buf);
+ return 1;
+ }
+
+ offset= (off_t)start_page * (off_t)physical_page_size;
+
+ if (lseek(fd, offset, SEEK_SET) != offset)
+ {
+ perror("Error; Unable to seek to necessary offset");
+ free(big_buf);
+ return 1;
+ }
+ }
+
+ /* main checksumming loop */
+ ct= start_page;
+ lastt= 0;
+ while (!feof(f))
+ {
+ bytes= fread(buf, 1, physical_page_size, f);
+ if (!bytes && feof(f))
+ {
+ free(big_buf);
+ return 0;
+ }
+
+ if (ferror(f))
+ {
+ fprintf(stderr, "Error reading %lu bytes", physical_page_size);
+ perror(" ");
+ free(big_buf);
+ return 1;
+ }
+
+ if (compressed) {
+ /* compressed pages */
+ if (!page_zip_verify_checksum(buf, physical_page_size)) {
+ fprintf(stderr, "Fail; page %lu invalid (fails compressed page checksum).\n", ct);
+ if (!skip_corrupt)
+ {
+ free(big_buf);
+ return 1;
+ }
+ }
+ } else {
+
+ /* check the "stored log sequence numbers" */
+ logseq= mach_read_from_4(buf + FIL_PAGE_LSN + 4);
+ logseqfield= mach_read_from_4(buf + logical_page_size - FIL_PAGE_END_LSN_OLD_CHKSUM + 4);
+ if (debug)
+ printf("page %lu: log sequence number: first = %lu; second = %lu\n", ct, logseq, logseqfield);
+ if (logseq != logseqfield)
+ {
+ fprintf(stderr, "Fail; page %lu invalid (fails log sequence number check)\n", ct);
+ if (!skip_corrupt)
+ {
+ free(big_buf);
+ return 1;
+ }
+ }
+
+ /* check old method of checksumming */
+ oldcsum= buf_calc_page_old_checksum(buf);
+ oldcsumfield= mach_read_from_4(buf + logical_page_size - FIL_PAGE_END_LSN_OLD_CHKSUM);
+ if (debug)
+ printf("page %lu: old style: calculated = %lu; recorded = %lu\n", ct, oldcsum, oldcsumfield);
+ if (oldcsumfield != mach_read_from_4(buf + FIL_PAGE_LSN) && oldcsumfield != oldcsum)
+ {
+ fprintf(stderr, "Fail; page %lu invalid (fails old style checksum)\n", ct);
+ if (!skip_corrupt)
+ {
+ free(big_buf);
+ return 1;
+ }
+ }
+
+ /* now check the new method */
+ csum= buf_calc_page_new_checksum(buf);
+ crc32= buf_calc_page_crc32(buf);
+ csumfield= mach_read_from_4(buf + FIL_PAGE_SPACE_OR_CHKSUM);
+ if (debug)
+ printf("page %lu: new style: calculated = %lu; crc32 = %lu; recorded = %lu\n",
+ ct, csum, crc32, csumfield);
+ if (csumfield != 0 && crc32 != csumfield && csum != csumfield)
+ {
+ fprintf(stderr, "Fail; page %lu invalid (fails innodb and crc32 checksum)\n", ct);
+ if (!skip_corrupt)
+ {
+ free(big_buf);
+ return 1;
+ }
+ }
+ }
+ /* end if this was the last page we were supposed to check */
+ if (use_end_page && (ct >= end_page))
+ {
+ free(big_buf);
+ return 0;
+ }
+
+ /* do counter increase and progress printing */
+ ct++;
+ if (verbose)
+ {
+ if (ct % 64 == 0)
+ {
+ now= time(0);
+ if (!lastt) lastt= now;
+ if (now - lastt >= 1)
+ {
+ printf("page %lu okay: %.3f%% done\n", (ct - 1), (float) ct / pages * 100);
+ lastt= now;
+ }
+ }
+ }
+ }
+ free(big_buf);
+ return 0;
+}
diff --git a/extra/replace.c b/extra/replace.c
index c4105e45973..56cf02f2002 100644
--- a/extra/replace.c
+++ b/extra/replace.c
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2000, 2011, Oracle and/or its affiliates
+ Copyright (c) 2000, 2014, Oracle and/or its affiliates
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
@@ -1021,7 +1021,7 @@ FILE *in,*out;
updated=retain=0;
reset_buffer();
- while ((error=fill_buffer_retaining(fileno(in),retain)) > 0)
+ while ((error=fill_buffer_retaining(my_fileno(in),retain)) > 0)
{
end_of_line=buffer ;
buffer[bufbytes]=0; /* Sentinel */
diff --git a/extra/yassl/README b/extra/yassl/README
index b18e2baeea8..30c7af4a702 100644
--- a/extra/yassl/README
+++ b/extra/yassl/README
@@ -12,6 +12,15 @@ before calling SSL_new();
*** end Note ***
+yaSSL Release notes, version 2.3.5 (9/29/2014)
+
+ This release of yaSSL fixes an RSA Padding check vulnerability reported by
+ Intel Security Advanced Threat Research team
+
+See normal build instructions below under 1.0.6.
+See libcurl build instructions below under 1.3.0 and note in 1.5.8.
+
+
yaSSL Release notes, version 2.3.4 (8/15/2014)
This release of yaSSL adds checking to the input_buffer class itself.
diff --git a/extra/yassl/include/openssl/ssl.h b/extra/yassl/include/openssl/ssl.h
index 835a46eaea8..f819d76adc7 100644
--- a/extra/yassl/include/openssl/ssl.h
+++ b/extra/yassl/include/openssl/ssl.h
@@ -34,7 +34,7 @@
#include "rsa.h"
-#define YASSL_VERSION "2.3.4"
+#define YASSL_VERSION "2.3.5"
#if defined(__cplusplus)
diff --git a/extra/yassl/src/yassl_imp.cpp b/extra/yassl/src/yassl_imp.cpp
index e2da042457f..25e00d45d2b 100644
--- a/extra/yassl/src/yassl_imp.cpp
+++ b/extra/yassl/src/yassl_imp.cpp
@@ -24,7 +24,7 @@
#include "handshake.hpp"
#include "asn.hpp" // provide crypto wrapper??
-
+#include <my_attribute.h>
namespace yaSSL {
@@ -947,7 +947,7 @@ void Alert::Process(input_buffer& input, SSL& ssl)
if (ssl.getSecurity().get_parms().cipher_type_ == block) {
int ivExtra = 0;
- opaque fill;
+ opaque fill __attribute__((unused));
if (ssl.isTLSv1_1())
ivExtra = ssl.getCrypto().get_cipher().get_blockSize();
@@ -2395,7 +2395,7 @@ void Finished::Process(input_buffer& input, SSL& ssl)
if (ssl.isTLSv1_1())
ivExtra = ssl.getCrypto().get_cipher().get_blockSize();
- opaque fill;
+ opaque fill __attribute__((unused));
int padSz = ssl.getSecurity().get_parms().encrypt_size_ - ivExtra -
HANDSHAKE_HEADER - finishedSz - digestSz;
for (int i = 0; i < padSz; i++)
diff --git a/extra/yassl/taocrypt/src/rsa.cpp b/extra/yassl/taocrypt/src/rsa.cpp
index 69c59402e48..79a8a8f1c4f 100644
--- a/extra/yassl/taocrypt/src/rsa.cpp
+++ b/extra/yassl/taocrypt/src/rsa.cpp
@@ -177,7 +177,7 @@ word32 RSA_BlockType1::UnPad(const byte* pkcsBlock, word32 pkcsBlockLen,
// skip past the padding until we find the separator
unsigned i=1;
- while (i<pkcsBlockLen && pkcsBlock[i++]) { // null body
+ while (i<pkcsBlockLen && pkcsBlock[i++] == 0xFF) { // null body
}
if (!(i==pkcsBlockLen || pkcsBlock[i-1]==0))
return 0;
diff --git a/include/my_check_opt.h b/include/my_check_opt.h
index a95cb79b3ac..ccf003decab 100644
--- a/include/my_check_opt.h
+++ b/include/my_check_opt.h
@@ -64,9 +64,11 @@ extern "C" {
#define T_ZEROFILL_KEEP_LSN (1ULL << 33)
/** If repair should not bump create_rename_lsn */
#define T_NO_CREATE_RENAME_LSN (1ULL << 34)
-#define T_CREATE_UNIQUE_BY_SORT (1ULL << 35)
-#define T_SUPPRESS_ERR_HANDLING (1ULL << 36)
-#define T_FORCE_SORT_MEMORY (1ULL << 37)
+/** If repair shouldn't do any locks */
+#define T_NO_LOCKS (1ULL << 35)
+#define T_CREATE_UNIQUE_BY_SORT (1ULL << 36)
+#define T_SUPPRESS_ERR_HANDLING (1ULL << 37)
+#define T_FORCE_SORT_MEMORY (1ULL << 38)
#define T_REP_ANY (T_REP | T_REP_BY_SORT | T_REP_PARALLEL)
diff --git a/include/my_pthread.h b/include/my_pthread.h
index 0be821586a1..7770c28f45f 100644
--- a/include/my_pthread.h
+++ b/include/my_pthread.h
@@ -1,5 +1,5 @@
-/* Copyright (c) 2000, 2013, Oracle and/or its affiliates.
- Copyright (c) 2009, 2013, Monty Program Ab.
+/* Copyright (c) 2000, 2014, Oracle and/or its affiliates.
+ Copyright (c) 2009, 2014, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -104,8 +104,18 @@ int pthread_attr_init(pthread_attr_t *connect_att);
int pthread_attr_setstacksize(pthread_attr_t *connect_att,DWORD stack);
int pthread_attr_destroy(pthread_attr_t *connect_att);
int my_pthread_once(my_pthread_once_t *once_control,void (*init_routine)(void));
-struct tm *localtime_r(const time_t *timep,struct tm *tmp);
-struct tm *gmtime_r(const time_t *timep,struct tm *tmp);
+
+static inline struct tm *localtime_r(const time_t *timep, struct tm *tmp)
+{
+ localtime_s(tmp, timep);
+ return tmp;
+}
+
+static inline struct tm *gmtime_r(const time_t *clock, struct tm *res)
+{
+ gmtime_s(res, clock);
+ return res;
+}
void pthread_exit(void *a);
int pthread_join(pthread_t thread, void **value_ptr);
@@ -721,11 +731,10 @@ struct st_my_thread_var
mysql_cond_t * volatile current_cond;
pthread_t pthread_self;
my_thread_id id;
- int cmp_length;
int volatile abort;
my_bool init;
struct st_my_thread_var *next,**prev;
- void *opt_info;
+ void *keycache_link;
uint lock_type; /* used by conditional release the queue */
void *stack_ends_here;
safe_mutex_t *mutex_in_use;
diff --git a/include/mysql.h b/include/mysql.h
index 025bd397a65..63e86937063 100644
--- a/include/mysql.h
+++ b/include/mysql.h
@@ -75,6 +75,7 @@ typedef int my_socket;
#include "my_list.h" /* for LISTs used in 'MYSQL' and 'MYSQL_STMT' */
+extern unsigned int mariadb_deinitialize_ssl;
extern unsigned int mysql_port;
extern char *mysql_unix_port;
diff --git a/include/mysql.h.pp b/include/mysql.h.pp
index 6b60389acc3..dd794e856e1 100644
--- a/include/mysql.h.pp
+++ b/include/mysql.h.pp
@@ -172,6 +172,7 @@ extern LIST *list_reverse(LIST *root);
extern void list_free(LIST *root,unsigned int free_data);
extern unsigned int list_length(LIST *);
extern int list_walk(LIST *,list_walk_action action,unsigned char * argument);
+extern unsigned int mariadb_deinitialize_ssl;
extern unsigned int mysql_port;
extern char *mysql_unix_port;
typedef struct st_mysql_field {
diff --git a/libmysql/CMakeLists.txt b/libmysql/CMakeLists.txt
index d432d499d47..bb85b47f863 100644
--- a/libmysql/CMakeLists.txt
+++ b/libmysql/CMakeLists.txt
@@ -257,7 +257,8 @@ mariadb_dyncol_unpack_free
mariadb_dyncol_column_cmp_named
mariadb_dyncol_column_count
mariadb_dyncol_prepare_decimal
-
+#
+mariadb_deinitialize_ssl
# Added in MariaDB-10.0 to stay compatible with MySQL-5.6, yuck!
mysql_options4
)
diff --git a/libmysql/libmysql.c b/libmysql/libmysql.c
index 554c3fc0ab6..a0bfe4e0d4f 100644
--- a/libmysql/libmysql.c
+++ b/libmysql/libmysql.c
@@ -205,7 +205,8 @@ void STDCALL mysql_server_end()
mysql_client_plugin_deinit();
finish_client_errs();
- vio_end();
+ if (mariadb_deinitialize_ssl)
+ vio_end();
#ifdef EMBEDDED_LIBRARY
end_embedded_server();
#endif
diff --git a/libmysqld/lib_sql.cc b/libmysqld/lib_sql.cc
index 9024ae9321e..a5fef8ecdd0 100644
--- a/libmysqld/lib_sql.cc
+++ b/libmysqld/lib_sql.cc
@@ -2,6 +2,9 @@
* Copyright (c) 2000
* SWsoft company
*
+ * Modifications copyright (c) 2001, 2013. Oracle and/or its affiliates.
+ * All rights reserved.
+ *
* This material is provided "as is", with absolutely no warranty expressed
* or implied. Any use is at your own risk.
*
diff --git a/mysql-test/include/ctype_like_cond_propagation.inc b/mysql-test/include/ctype_like_cond_propagation.inc
new file mode 100644
index 00000000000..023cf2b1b30
--- /dev/null
+++ b/mysql-test/include/ctype_like_cond_propagation.inc
@@ -0,0 +1,39 @@
+--echo #
+--echo # MDEV-7149 Constant condition propagation erroneously applied for LIKE
+--echo #
+
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+INSERT INTO t1 VALUES ('a'),('a ');
+SELECT * FROM t1 WHERE CONCAT(c1)='a';
+SELECT * FROM t1 WHERE CONCAT(c1) LIKE 'a ';
+SELECT * FROM t1 WHERE CONCAT(c1)='a' AND CONCAT(c1) LIKE 'a ';
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE CONCAT(c1)='a' AND CONCAT(c1) LIKE 'a ';
+DROP TABLE t1;
+
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+INSERT INTO t1 VALUES ('a'),('a ');
+SELECT * FROM t1 WHERE 'a'=CONCAT(c1);
+SELECT * FROM t1 WHERE 'a ' LIKE CONCAT(c1);
+SELECT * FROM t1 WHERE 'a'=CONCAT(c1) AND 'a ' LIKE CONCAT(c1);
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE 'a'=CONCAT(c1) AND 'a ' LIKE CONCAT(c1);
+DROP TABLE t1;
+
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+INSERT INTO t1 VALUES ('%'),('% ');
+SELECT * FROM t1 WHERE '% '=CONCAT(c1);
+SELECT * FROM t1 WHERE 'a' LIKE CONCAT(c1);
+SELECT * FROM t1 WHERE '% '=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE '% '=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+DROP TABLE t1;
+
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+INSERT INTO t1 VALUES ('%'),('% ');
+SELECT * FROM t1 WHERE '%'=CONCAT(c1);
+SELECT * FROM t1 WHERE 'a' LIKE CONCAT(c1);
+SELECT * FROM t1 WHERE '%'=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE '%'=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+DROP TABLE t1;
diff --git a/mysql-test/include/ctype_like_cond_propagation_utf8_german.inc b/mysql-test/include/ctype_like_cond_propagation_utf8_german.inc
new file mode 100644
index 00000000000..2cfa89448bc
--- /dev/null
+++ b/mysql-test/include/ctype_like_cond_propagation_utf8_german.inc
@@ -0,0 +1,16 @@
+--echo #
+--echo # MDEV-7149 Constant condition propagation erroneously applied for LIKE
+--echo #
+
+CREATE TABLE t1 AS SELECT REPEAT('a',10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+INSERT INTO t1 VALUES ('ae'),('ä');
+SELECT * FROM t1 WHERE c1='ä';
+SELECT * FROM t1 WHERE c1 LIKE 'ae';
+SELECT * FROM t1 WHERE c1='ä' AND c1 LIKE 'ae';
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE c1='ä' AND c1 LIKE 'ae';
+SELECT * FROM t1 WHERE CONCAT(c1)='ä';
+SELECT * FROM t1 WHERE CONCAT(c1) LIKE 'ae';
+SELECT * FROM t1 WHERE CONCAT(c1)='ä' AND CONCAT(c1) LIKE 'ae';
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE CONCAT(c1)='ä' AND CONCAT(c1) LIKE 'ae';
+DROP TABLE IF EXISTS t1;
diff --git a/mysql-test/include/not_embedded.inc b/mysql-test/include/not_embedded.inc
index c3a9c3e12c8..88185af3b15 100644
--- a/mysql-test/include/not_embedded.inc
+++ b/mysql-test/include/not_embedded.inc
@@ -4,8 +4,7 @@
#
# The test below is redundant
--- require r/not_embedded.require
-disable_query_log;
-select version() like '%embedded%' as 'have_embedded';
-enable_query_log;
+if (`select version() like '%embedded%'`) {
+ This should never happen;
+}
diff --git a/mysql-test/include/restart_mysqld.inc b/mysql-test/include/restart_mysqld.inc
index 49f6dfd5364..3d53fada870 100644
--- a/mysql-test/include/restart_mysqld.inc
+++ b/mysql-test/include/restart_mysqld.inc
@@ -1,3 +1,4 @@
+--source include/not_embedded.inc
if ($rpl_inited)
{
diff --git a/mysql-test/lib/My/SafeProcess/Base.pm b/mysql-test/lib/My/SafeProcess/Base.pm
index 0e8c191c440..1ac0120a735 100644
--- a/mysql-test/lib/My/SafeProcess/Base.pm
+++ b/mysql-test/lib/My/SafeProcess/Base.pm
@@ -180,6 +180,7 @@ sub create_process {
}
$SIG{INT}= 'DEFAULT';
+ $SIG{HUP}= 'DEFAULT';
# Make this process it's own process group to be able to kill
# it and any childs(that hasn't changed group themself)
diff --git a/mysql-test/lib/My/SafeProcess/safe_process.cc b/mysql-test/lib/My/SafeProcess/safe_process.cc
index f19ca622278..d6110f5f8c8 100644
--- a/mysql-test/lib/My/SafeProcess/safe_process.cc
+++ b/mysql-test/lib/My/SafeProcess/safe_process.cc
@@ -141,6 +141,7 @@ extern "C" void handle_signal(int sig)
// Ignore further signals
signal(SIGTERM, SIG_IGN);
signal(SIGINT, SIG_IGN);
+ signal(SIGHUP, SIG_IGN);
// Continune execution, allow the child to be started and
// finally terminated by monitor loop
@@ -164,6 +165,7 @@ int main(int argc, char* const argv[] )
/* Install signal handlers */
sigaction(SIGTERM, &sa,NULL);
sigaction(SIGINT, &sa,NULL);
+ sigaction(SIGHUP, &sa, NULL);
sigaction(SIGCHLD, &sa,NULL);
sigaction(SIGABRT, &sa_abort,NULL);
@@ -231,6 +233,7 @@ int main(int argc, char* const argv[] )
// Use default signal handlers in child
signal(SIGTERM, SIG_DFL);
signal(SIGINT, SIG_DFL);
+ signal(SIGHUP, SIG_DFL);
signal(SIGCHLD, SIG_DFL);
// Make this process it's own process group to be able to kill
diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl
index 4d9d854cc47..6bb6e5a3c79 100755
--- a/mysql-test/mysql-test-run.pl
+++ b/mysql-test/mysql-test-run.pl
@@ -109,6 +109,7 @@ require "lib/mtr_gprof.pl";
require "lib/mtr_misc.pl";
$SIG{INT}= sub { mtr_error("Got ^C signal"); };
+$SIG{HUP}= sub { mtr_error("Hangup detected on controlling terminal"); };
our $mysql_version_id;
my $mysql_version_extra;
@@ -262,6 +263,7 @@ our $opt_ddd;
our $opt_client_ddd;
my $opt_boot_ddd;
our $opt_manual_gdb;
+our $opt_manual_lldb;
our $opt_manual_dbx;
our $opt_manual_ddd;
our $opt_manual_debug;
@@ -923,6 +925,7 @@ sub run_worker ($) {
my ($server_port, $thread_num)= @_;
$SIG{INT}= sub { exit(1); };
+ $SIG{HUP}= sub { exit(1); };
# Connect to server
my $server = new IO::Socket::INET
@@ -1172,6 +1175,7 @@ sub command_line_setup {
'gdb' => \$opt_gdb,
'client-gdb' => \$opt_client_gdb,
'manual-gdb' => \$opt_manual_gdb,
+ 'manual-lldb' => \$opt_manual_lldb,
'boot-gdb' => \$opt_boot_gdb,
'manual-debug' => \$opt_manual_debug,
'ddd' => \$opt_ddd,
@@ -1502,6 +1506,7 @@ sub command_line_setup {
{
$default_vardir= "$glob_mysql_test_dir/var";
}
+ $default_vardir = realpath $default_vardir unless IS_WINDOWS;
if ( ! $opt_vardir )
{
@@ -1608,8 +1613,9 @@ sub command_line_setup {
$opt_debugger= undef;
}
- if ( $opt_gdb || $opt_ddd || $opt_manual_gdb || $opt_manual_ddd ||
- $opt_manual_debug || $opt_debugger || $opt_dbx || $opt_manual_dbx)
+ if ( $opt_gdb || $opt_ddd || $opt_manual_gdb || $opt_manual_lldb ||
+ $opt_manual_ddd || $opt_manual_debug || $opt_debugger || $opt_dbx ||
+ $opt_manual_dbx)
{
mtr_error("You need to use the client debug options for the",
"embedded server. Ex: --client-gdb");
@@ -1636,9 +1642,9 @@ sub command_line_setup {
# --------------------------------------------------------------------------
# Check debug related options
# --------------------------------------------------------------------------
- if ( $opt_gdb || $opt_client_gdb || $opt_ddd || $opt_client_ddd ||
- $opt_manual_gdb || $opt_manual_ddd || $opt_manual_debug ||
- $opt_dbx || $opt_client_dbx || $opt_manual_dbx ||
+ if ( $opt_gdb || $opt_client_gdb || $opt_ddd || $opt_client_ddd ||
+ $opt_manual_gdb || $opt_manual_lldb || $opt_manual_ddd ||
+ $opt_manual_debug || $opt_dbx || $opt_client_dbx || $opt_manual_dbx ||
$opt_debugger || $opt_client_debugger )
{
if ( using_extern() )
@@ -2495,6 +2501,26 @@ sub environment_setup {
"$bindir/sql$opt_vs_config/mysql_tzinfo_to_sql");
$ENV{'MYSQL_TZINFO_TO_SQL'}= native_path($exe_mysql_tzinfo_to_sql);
+ # ----------------------------------------------------
+ # replace
+ # ----------------------------------------------------
+ my $exe_replace= mtr_exe_exists(vs_config_dirs('extra', 'replace'),
+ "$basedir/extra/replace",
+ "$bindir/extra$opt_vs_config/replace",
+ "$path_client_bindir/replace");
+ $ENV{'REPLACE'}= native_path($exe_replace);
+
+ # ----------------------------------------------------
+ # innochecksum
+ # ----------------------------------------------------
+ my $exe_innochecksum=
+ mtr_exe_maybe_exists("$bindir/extra$opt_vs_config/innochecksum",
+ "$path_client_bindir/innochecksum");
+ if ($exe_innochecksum)
+ {
+ $ENV{'INNOCHECKSUM'}= native_path($exe_innochecksum);
+ }
+
# Create an environment variable to make it possible
# to detect that valgrind is being used from test cases
$ENV{'VALGRIND_TEST'}= $opt_valgrind;
@@ -5455,6 +5481,10 @@ sub mysqld_start ($$) {
{
gdb_arguments(\$args, \$exe, $mysqld->name());
}
+ elsif ( $opt_manual_lldb )
+ {
+ lldb_arguments(\$args, \$exe, $mysqld->name());
+ }
elsif ( $opt_ddd || $opt_manual_ddd )
{
ddd_arguments(\$args, \$exe, $mysqld->name());
@@ -6001,7 +6031,6 @@ sub start_mysqltest ($) {
return $proc;
}
-
#
# Modify the exe and args so that program is run in gdb in xterm
#
@@ -6052,6 +6081,32 @@ sub gdb_arguments {
$$exe= "xterm";
}
+#
+# Modify the exe and args so that program is run in lldb
+#
+sub lldb_arguments {
+ my $args= shift;
+ my $exe= shift;
+ my $type= shift;
+ my $input= shift;
+
+ my $lldb_init_file= "$opt_vardir/tmp/lldbinit.$type";
+ unlink($lldb_init_file);
+
+ # Put $args into a single string
+ my $str= join(" ", @$$args);
+ $input = $input ? "< $input" : "";
+
+ # write init file for mysqld or client
+ mtr_tofile($lldb_init_file, "set args $str $input\n");
+
+ print "\nTo start lldb for $type, type in another window:\n";
+ print "cd $glob_mysql_test_dir && lldb -s $lldb_init_file $$exe\n";
+
+ # Indicate the exe should not be started
+ $$exe= undef;
+ return;
+}
#
# Modify the exe and args so that program is run in ddd
@@ -6180,7 +6235,6 @@ sub debugger_arguments {
}
}
-
#
# Modify the exe and args so that program is run in valgrind
#
@@ -6202,10 +6256,14 @@ sub valgrind_arguments {
if -f "$glob_mysql_test_dir/valgrind.supp";
# Ensure the jemalloc works with mysqld
- if ($mysqld_variables{'version-malloc-library'} ne "system" &&
- $$exe =~ /mysqld/)
+ if ($$exe =~ /mysqld/)
{
- mtr_add_arg($args, "--soname-synonyms=somalloc=NONE" );
+ my %somalloc=(
+ 'system jemalloc' => 'libjemalloc*',
+ 'bundled jemalloc' => 'NONE'
+ );
+ my ($syn) = $somalloc{$mysqld_variables{'version-malloc-library'}};
+ mtr_add_arg($args, '--soname-synonyms=somalloc=%s', $syn) if $syn;
}
}
@@ -6483,6 +6541,8 @@ Options for debugging the product
test(s)
manual-dbx Let user manually start mysqld in dbx, before running
test(s)
+ manual-lldb Let user manually start mysqld in lldb, before running
+ test(s)
max-save-core Limit the number of core files saved (to avoid filling
up disks for heavily crashing server). Defaults to
$opt_max_save_core, set to 0 for no limit. Set
diff --git a/mysql-test/r/change_user_notembedded.result b/mysql-test/r/change_user_notembedded.result
index 60579d15ec2..896a6045481 100644
--- a/mysql-test/r/change_user_notembedded.result
+++ b/mysql-test/r/change_user_notembedded.result
@@ -3,3 +3,4 @@ ERROR 28000: Access denied for user 'foo'@'localhost' (using password: NO)
ERROR 28000: Access denied for user 'foo'@'localhost' (using password: YES)
ERROR 08S01: Unknown command
ERROR 08S01: Unknown command
+that's all
diff --git a/mysql-test/r/create_or_replace.result b/mysql-test/r/create_or_replace.result
index a2f06c38cb5..ff8170b7309 100644
--- a/mysql-test/r/create_or_replace.result
+++ b/mysql-test/r/create_or_replace.result
@@ -436,7 +436,9 @@ CREATE OR REPLACE TEMPORARY TABLE tmp LIKE t1;
LOCK TABLE t1 WRITE;
CREATE OR REPLACE TABLE t1 LIKE tmp;
KILL QUERY con_id;
+ERROR 70100: Query execution was interrupted
CREATE OR REPLACE TABLE t1 (a int);
KILL QUERY con_id;
+ERROR 70100: Query execution was interrupted
drop table t1;
DROP TABLE t2;
diff --git a/mysql-test/r/ctype_binary.result b/mysql-test/r/ctype_binary.result
index 550c47ccdaa..f35e6fd8265 100644
--- a/mysql-test/r/ctype_binary.result
+++ b/mysql-test/r/ctype_binary.result
@@ -2915,3 +2915,98 @@ SET sql_mode=default;
#
# End of 5.5 tests
#
+#
+# Start of 10.0 tests
+#
+SET NAMES binary;
+#
+# MDEV-7149 Constant condition propagation erroneously applied for LIKE
+#
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varbinary(10) NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('a'),('a ');
+SELECT * FROM t1 WHERE CONCAT(c1)='a';
+c1
+a
+SELECT * FROM t1 WHERE CONCAT(c1) LIKE 'a ';
+c1
+a
+SELECT * FROM t1 WHERE CONCAT(c1)='a' AND CONCAT(c1) LIKE 'a ';
+c1
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE CONCAT(c1)='a' AND CONCAT(c1) LIKE 'a ';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where 0
+DROP TABLE t1;
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varbinary(10) NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('a'),('a ');
+SELECT * FROM t1 WHERE 'a'=CONCAT(c1);
+c1
+a
+SELECT * FROM t1 WHERE 'a ' LIKE CONCAT(c1);
+c1
+a
+SELECT * FROM t1 WHERE 'a'=CONCAT(c1) AND 'a ' LIKE CONCAT(c1);
+c1
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE 'a'=CONCAT(c1) AND 'a ' LIKE CONCAT(c1);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where 0
+DROP TABLE t1;
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varbinary(10) NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('%'),('% ');
+SELECT * FROM t1 WHERE '% '=CONCAT(c1);
+c1
+%
+SELECT * FROM t1 WHERE 'a' LIKE CONCAT(c1);
+c1
+%
+SELECT * FROM t1 WHERE '% '=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+c1
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE '% '=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where 0
+DROP TABLE t1;
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varbinary(10) NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('%'),('% ');
+SELECT * FROM t1 WHERE '%'=CONCAT(c1);
+c1
+%
+SELECT * FROM t1 WHERE 'a' LIKE CONCAT(c1);
+c1
+%
+SELECT * FROM t1 WHERE '%'=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+c1
+%
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE '%'=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where ('%' = concat(`test`.`t1`.`c1`))
+DROP TABLE t1;
+#
+# End of 10.0 tests
+#
diff --git a/mysql-test/r/ctype_latin1.result b/mysql-test/r/ctype_latin1.result
index fac9824401f..970c067b25c 100644
--- a/mysql-test/r/ctype_latin1.result
+++ b/mysql-test/r/ctype_latin1.result
@@ -7659,6 +7659,198 @@ DROP FUNCTION mysql_real_escape_string_generated;
DROP FUNCTION iswellformed;
DROP TABLE allbytes;
# End of ctype_backslash.inc
+SET NAMES latin1;
+#
+# MDEV-7149 Constant condition propagation erroneously applied for LIKE
+#
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(10) NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('a'),('a ');
+SELECT * FROM t1 WHERE CONCAT(c1)='a';
+c1
+a
+a
+SELECT * FROM t1 WHERE CONCAT(c1) LIKE 'a ';
+c1
+a
+SELECT * FROM t1 WHERE CONCAT(c1)='a' AND CONCAT(c1) LIKE 'a ';
+c1
+a
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE CONCAT(c1)='a' AND CONCAT(c1) LIKE 'a ';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where ((concat(`test`.`t1`.`c1`) = 'a') and (concat(`test`.`t1`.`c1`) like 'a '))
+DROP TABLE t1;
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(10) NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('a'),('a ');
+SELECT * FROM t1 WHERE 'a'=CONCAT(c1);
+c1
+a
+a
+SELECT * FROM t1 WHERE 'a ' LIKE CONCAT(c1);
+c1
+a
+SELECT * FROM t1 WHERE 'a'=CONCAT(c1) AND 'a ' LIKE CONCAT(c1);
+c1
+a
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE 'a'=CONCAT(c1) AND 'a ' LIKE CONCAT(c1);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where (('a' = concat(`test`.`t1`.`c1`)) and ('a ' like concat(`test`.`t1`.`c1`)))
+DROP TABLE t1;
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(10) NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('%'),('% ');
+SELECT * FROM t1 WHERE '% '=CONCAT(c1);
+c1
+%
+%
+SELECT * FROM t1 WHERE 'a' LIKE CONCAT(c1);
+c1
+%
+SELECT * FROM t1 WHERE '% '=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+c1
+%
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE '% '=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where (('% ' = concat(`test`.`t1`.`c1`)) and ('a' like concat(`test`.`t1`.`c1`)))
+DROP TABLE t1;
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(10) NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('%'),('% ');
+SELECT * FROM t1 WHERE '%'=CONCAT(c1);
+c1
+%
+%
+SELECT * FROM t1 WHERE 'a' LIKE CONCAT(c1);
+c1
+%
+SELECT * FROM t1 WHERE '%'=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+c1
+%
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE '%'=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where (('%' = concat(`test`.`t1`.`c1`)) and ('a' like concat(`test`.`t1`.`c1`)))
+DROP TABLE t1;
+SET NAMES latin1 COLLATE latin1_bin;
+#
+# MDEV-7149 Constant condition propagation erroneously applied for LIKE
+#
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(10) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('a'),('a ');
+SELECT * FROM t1 WHERE CONCAT(c1)='a';
+c1
+a
+a
+SELECT * FROM t1 WHERE CONCAT(c1) LIKE 'a ';
+c1
+a
+SELECT * FROM t1 WHERE CONCAT(c1)='a' AND CONCAT(c1) LIKE 'a ';
+c1
+a
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE CONCAT(c1)='a' AND CONCAT(c1) LIKE 'a ';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where ((concat(`test`.`t1`.`c1`) = 'a') and (concat(`test`.`t1`.`c1`) like 'a '))
+DROP TABLE t1;
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(10) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('a'),('a ');
+SELECT * FROM t1 WHERE 'a'=CONCAT(c1);
+c1
+a
+a
+SELECT * FROM t1 WHERE 'a ' LIKE CONCAT(c1);
+c1
+a
+SELECT * FROM t1 WHERE 'a'=CONCAT(c1) AND 'a ' LIKE CONCAT(c1);
+c1
+a
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE 'a'=CONCAT(c1) AND 'a ' LIKE CONCAT(c1);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where (('a' = concat(`test`.`t1`.`c1`)) and ('a ' like concat(`test`.`t1`.`c1`)))
+DROP TABLE t1;
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(10) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('%'),('% ');
+SELECT * FROM t1 WHERE '% '=CONCAT(c1);
+c1
+%
+%
+SELECT * FROM t1 WHERE 'a' LIKE CONCAT(c1);
+c1
+%
+SELECT * FROM t1 WHERE '% '=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+c1
+%
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE '% '=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where (('% ' = concat(`test`.`t1`.`c1`)) and ('a' like concat(`test`.`t1`.`c1`)))
+DROP TABLE t1;
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(10) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('%'),('% ');
+SELECT * FROM t1 WHERE '%'=CONCAT(c1);
+c1
+%
+%
+SELECT * FROM t1 WHERE 'a' LIKE CONCAT(c1);
+c1
+%
+SELECT * FROM t1 WHERE '%'=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+c1
+%
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE '%'=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where (('%' = concat(`test`.`t1`.`c1`)) and ('a' like concat(`test`.`t1`.`c1`)))
+DROP TABLE t1;
#
# MDEV-6752 Trailing incomplete characters are not replaced to question marks on conversion
#
diff --git a/mysql-test/r/ctype_uca.result b/mysql-test/r/ctype_uca.result
index c947689ef81..16b60aed07f 100644
--- a/mysql-test/r/ctype_uca.result
+++ b/mysql-test/r/ctype_uca.result
@@ -7700,6 +7700,276 @@ DROP TABLE t1;
# Start of MariaDB-10.0 tests
#
+SET NAMES utf8 COLLATE utf8_unicode_ci;
+#
+# MDEV-7149 Constant condition propagation erroneously applied for LIKE
+#
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(10) CHARACTER SET utf8 COLLATE utf8_unicode_ci NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('a'),('a ');
+SELECT * FROM t1 WHERE CONCAT(c1)='a';
+c1
+a
+a
+SELECT * FROM t1 WHERE CONCAT(c1) LIKE 'a ';
+c1
+a
+SELECT * FROM t1 WHERE CONCAT(c1)='a' AND CONCAT(c1) LIKE 'a ';
+c1
+a
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE CONCAT(c1)='a' AND CONCAT(c1) LIKE 'a ';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where ((concat(`test`.`t1`.`c1`) = 'a') and (concat(`test`.`t1`.`c1`) like 'a '))
+DROP TABLE t1;
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(10) CHARACTER SET utf8 COLLATE utf8_unicode_ci NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('a'),('a ');
+SELECT * FROM t1 WHERE 'a'=CONCAT(c1);
+c1
+a
+a
+SELECT * FROM t1 WHERE 'a ' LIKE CONCAT(c1);
+c1
+a
+SELECT * FROM t1 WHERE 'a'=CONCAT(c1) AND 'a ' LIKE CONCAT(c1);
+c1
+a
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE 'a'=CONCAT(c1) AND 'a ' LIKE CONCAT(c1);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where (('a' = concat(`test`.`t1`.`c1`)) and ('a ' like concat(`test`.`t1`.`c1`)))
+DROP TABLE t1;
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(10) CHARACTER SET utf8 COLLATE utf8_unicode_ci NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('%'),('% ');
+SELECT * FROM t1 WHERE '% '=CONCAT(c1);
+c1
+%
+%
+SELECT * FROM t1 WHERE 'a' LIKE CONCAT(c1);
+c1
+%
+SELECT * FROM t1 WHERE '% '=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+c1
+%
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE '% '=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where (('% ' = concat(`test`.`t1`.`c1`)) and ('a' like concat(`test`.`t1`.`c1`)))
+DROP TABLE t1;
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(10) CHARACTER SET utf8 COLLATE utf8_unicode_ci NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('%'),('% ');
+SELECT * FROM t1 WHERE '%'=CONCAT(c1);
+c1
+%
+%
+SELECT * FROM t1 WHERE 'a' LIKE CONCAT(c1);
+c1
+%
+SELECT * FROM t1 WHERE '%'=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+c1
+%
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE '%'=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where (('%' = concat(`test`.`t1`.`c1`)) and ('a' like concat(`test`.`t1`.`c1`)))
+DROP TABLE t1;
+#
+# MDEV-7149 Constant condition propagation erroneously applied for LIKE
+#
+CREATE TABLE t1 AS SELECT REPEAT('a',10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(10) CHARACTER SET utf8 COLLATE utf8_unicode_ci NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('ae'),('ä');
+SELECT * FROM t1 WHERE c1='ä';
+c1
+ä
+SELECT * FROM t1 WHERE c1 LIKE 'ae';
+c1
+ae
+SELECT * FROM t1 WHERE c1='ä' AND c1 LIKE 'ae';
+c1
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE c1='ä' AND c1 LIKE 'ae';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where ((`test`.`t1`.`c1` = 'ä') and (`test`.`t1`.`c1` like 'ae'))
+SELECT * FROM t1 WHERE CONCAT(c1)='ä';
+c1
+ä
+SELECT * FROM t1 WHERE CONCAT(c1) LIKE 'ae';
+c1
+ae
+SELECT * FROM t1 WHERE CONCAT(c1)='ä' AND CONCAT(c1) LIKE 'ae';
+c1
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE CONCAT(c1)='ä' AND CONCAT(c1) LIKE 'ae';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where ((concat(`test`.`t1`.`c1`) = 'ä') and (concat(`test`.`t1`.`c1`) like 'ae'))
+DROP TABLE IF EXISTS t1;
+SET NAMES utf8 COLLATE utf8_german2_ci;
+#
+# MDEV-7149 Constant condition propagation erroneously applied for LIKE
+#
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(10) CHARACTER SET utf8 COLLATE utf8_german2_ci NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('a'),('a ');
+SELECT * FROM t1 WHERE CONCAT(c1)='a';
+c1
+a
+a
+SELECT * FROM t1 WHERE CONCAT(c1) LIKE 'a ';
+c1
+a
+SELECT * FROM t1 WHERE CONCAT(c1)='a' AND CONCAT(c1) LIKE 'a ';
+c1
+a
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE CONCAT(c1)='a' AND CONCAT(c1) LIKE 'a ';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where ((concat(`test`.`t1`.`c1`) = 'a') and (concat(`test`.`t1`.`c1`) like 'a '))
+DROP TABLE t1;
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(10) CHARACTER SET utf8 COLLATE utf8_german2_ci NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('a'),('a ');
+SELECT * FROM t1 WHERE 'a'=CONCAT(c1);
+c1
+a
+a
+SELECT * FROM t1 WHERE 'a ' LIKE CONCAT(c1);
+c1
+a
+SELECT * FROM t1 WHERE 'a'=CONCAT(c1) AND 'a ' LIKE CONCAT(c1);
+c1
+a
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE 'a'=CONCAT(c1) AND 'a ' LIKE CONCAT(c1);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where (('a' = concat(`test`.`t1`.`c1`)) and ('a ' like concat(`test`.`t1`.`c1`)))
+DROP TABLE t1;
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(10) CHARACTER SET utf8 COLLATE utf8_german2_ci NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('%'),('% ');
+SELECT * FROM t1 WHERE '% '=CONCAT(c1);
+c1
+%
+%
+SELECT * FROM t1 WHERE 'a' LIKE CONCAT(c1);
+c1
+%
+SELECT * FROM t1 WHERE '% '=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+c1
+%
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE '% '=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where (('% ' = concat(`test`.`t1`.`c1`)) and ('a' like concat(`test`.`t1`.`c1`)))
+DROP TABLE t1;
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(10) CHARACTER SET utf8 COLLATE utf8_german2_ci NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('%'),('% ');
+SELECT * FROM t1 WHERE '%'=CONCAT(c1);
+c1
+%
+%
+SELECT * FROM t1 WHERE 'a' LIKE CONCAT(c1);
+c1
+%
+SELECT * FROM t1 WHERE '%'=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+c1
+%
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE '%'=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where (('%' = concat(`test`.`t1`.`c1`)) and ('a' like concat(`test`.`t1`.`c1`)))
+DROP TABLE t1;
+#
+# MDEV-7149 Constant condition propagation erroneously applied for LIKE
+#
+CREATE TABLE t1 AS SELECT REPEAT('a',10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(10) CHARACTER SET utf8 COLLATE utf8_german2_ci NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('ae'),('ä');
+SELECT * FROM t1 WHERE c1='ä';
+c1
+ae
+ä
+SELECT * FROM t1 WHERE c1 LIKE 'ae';
+c1
+ae
+SELECT * FROM t1 WHERE c1='ä' AND c1 LIKE 'ae';
+c1
+ae
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE c1='ä' AND c1 LIKE 'ae';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where ((`test`.`t1`.`c1` = 'ä') and (`test`.`t1`.`c1` like 'ae'))
+SELECT * FROM t1 WHERE CONCAT(c1)='ä';
+c1
+ae
+ä
+SELECT * FROM t1 WHERE CONCAT(c1) LIKE 'ae';
+c1
+ae
+SELECT * FROM t1 WHERE CONCAT(c1)='ä' AND CONCAT(c1) LIKE 'ae';
+c1
+ae
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE CONCAT(c1)='ä' AND CONCAT(c1) LIKE 'ae';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where ((concat(`test`.`t1`.`c1`) = 'ä') and (concat(`test`.`t1`.`c1`) like 'ae'))
+DROP TABLE IF EXISTS t1;
#
# MDEV-4929 Myanmar collation
#
@@ -12856,5 +13126,38 @@ DROP TABLE t1;
# END of ctype_myanmar.inc
#
#
+# MDEV-7366 SELECT 'a' = BINARY 'A' returns 1 (utf8 charset, utf8_unicode_ci collation)
+#
+SET NAMES utf8 COLLATE utf8_unicode_ci;
+SELECT 'a' = BINARY 'A';
+'a' = BINARY 'A'
+0
+SELECT BINARY 'A' = 'a';
+BINARY 'A' = 'a'
+0
+#
+# Wrong result set for WHERE a='oe' COLLATE utf8_german2_ci AND a='oe'
+#
+SET NAMES utf8 COLLATE utf8_german2_ci;
+CREATE TABLE t1 (a CHAR(10) CHARACTER SET utf8);
+INSERT INTO t1 VALUES ('ö'),('oe');
+SELECT * FROM t1 WHERE a='oe' AND a='oe' COLLATE utf8_german2_ci;
+a
+oe
+SELECT * FROM t1 WHERE a='oe' COLLATE utf8_german2_ci AND a='oe';
+a
+oe
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE a='oe' AND a='oe' COLLATE utf8_german2_ci;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where ((`test`.`t1`.`a` = 'oe') and (`test`.`t1`.`a` = 'oe'))
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE a='oe' COLLATE utf8_german2_ci AND a='oe';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where ((`test`.`t1`.`a` = 'oe') and (`test`.`t1`.`a` = 'oe'))
+DROP TABLE t1;
+#
# End of MariaDB-10.0 tests
#
diff --git a/mysql-test/r/ctype_ucs.result b/mysql-test/r/ctype_ucs.result
index 3cfc076b8a0..68435f3f41c 100644
--- a/mysql-test/r/ctype_ucs.result
+++ b/mysql-test/r/ctype_ucs.result
@@ -5326,6 +5326,199 @@ DROP TABLE t1;
#
# Start of 10.0 tests
#
+SET NAMES latin1, collation_connection=ucs2_bin;
+#
+# MDEV-7149 Constant condition propagation erroneously applied for LIKE
+#
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(10) CHARACTER SET ucs2 COLLATE ucs2_bin NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('a'),('a ');
+SELECT * FROM t1 WHERE CONCAT(c1)='a';
+c1
+a
+a
+SELECT * FROM t1 WHERE CONCAT(c1) LIKE 'a ';
+c1
+a
+SELECT * FROM t1 WHERE CONCAT(c1)='a' AND CONCAT(c1) LIKE 'a ';
+c1
+a
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE CONCAT(c1)='a' AND CONCAT(c1) LIKE 'a ';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where ((concat(`test`.`t1`.`c1`) = 'a') and (concat(`test`.`t1`.`c1`) like 'a '))
+DROP TABLE t1;
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(10) CHARACTER SET ucs2 COLLATE ucs2_bin NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('a'),('a ');
+SELECT * FROM t1 WHERE 'a'=CONCAT(c1);
+c1
+a
+a
+SELECT * FROM t1 WHERE 'a ' LIKE CONCAT(c1);
+c1
+a
+SELECT * FROM t1 WHERE 'a'=CONCAT(c1) AND 'a ' LIKE CONCAT(c1);
+c1
+a
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE 'a'=CONCAT(c1) AND 'a ' LIKE CONCAT(c1);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where (('a' = concat(`test`.`t1`.`c1`)) and ('a ' like concat(`test`.`t1`.`c1`)))
+DROP TABLE t1;
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(10) CHARACTER SET ucs2 COLLATE ucs2_bin NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('%'),('% ');
+SELECT * FROM t1 WHERE '% '=CONCAT(c1);
+c1
+%
+%
+SELECT * FROM t1 WHERE 'a' LIKE CONCAT(c1);
+c1
+%
+SELECT * FROM t1 WHERE '% '=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+c1
+%
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE '% '=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where (('% ' = concat(`test`.`t1`.`c1`)) and ('a' like concat(`test`.`t1`.`c1`)))
+DROP TABLE t1;
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(10) CHARACTER SET ucs2 COLLATE ucs2_bin NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('%'),('% ');
+SELECT * FROM t1 WHERE '%'=CONCAT(c1);
+c1
+%
+%
+SELECT * FROM t1 WHERE 'a' LIKE CONCAT(c1);
+c1
+%
+SELECT * FROM t1 WHERE '%'=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+c1
+%
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE '%'=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where (('%' = concat(`test`.`t1`.`c1`)) and ('a' like concat(`test`.`t1`.`c1`)))
+DROP TABLE t1;
+SET NAMES latin1, collation_connection=ucs2_general_ci;
+#
+# MDEV-7149 Constant condition propagation erroneously applied for LIKE
+#
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(10) CHARACTER SET ucs2 NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('a'),('a ');
+SELECT * FROM t1 WHERE CONCAT(c1)='a';
+c1
+a
+a
+SELECT * FROM t1 WHERE CONCAT(c1) LIKE 'a ';
+c1
+a
+SELECT * FROM t1 WHERE CONCAT(c1)='a' AND CONCAT(c1) LIKE 'a ';
+c1
+a
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE CONCAT(c1)='a' AND CONCAT(c1) LIKE 'a ';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where ((concat(`test`.`t1`.`c1`) = 'a') and (concat(`test`.`t1`.`c1`) like 'a '))
+DROP TABLE t1;
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(10) CHARACTER SET ucs2 NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('a'),('a ');
+SELECT * FROM t1 WHERE 'a'=CONCAT(c1);
+c1
+a
+a
+SELECT * FROM t1 WHERE 'a ' LIKE CONCAT(c1);
+c1
+a
+SELECT * FROM t1 WHERE 'a'=CONCAT(c1) AND 'a ' LIKE CONCAT(c1);
+c1
+a
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE 'a'=CONCAT(c1) AND 'a ' LIKE CONCAT(c1);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where (('a' = concat(`test`.`t1`.`c1`)) and ('a ' like concat(`test`.`t1`.`c1`)))
+DROP TABLE t1;
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(10) CHARACTER SET ucs2 NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('%'),('% ');
+SELECT * FROM t1 WHERE '% '=CONCAT(c1);
+c1
+%
+%
+SELECT * FROM t1 WHERE 'a' LIKE CONCAT(c1);
+c1
+%
+SELECT * FROM t1 WHERE '% '=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+c1
+%
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE '% '=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where (('% ' = concat(`test`.`t1`.`c1`)) and ('a' like concat(`test`.`t1`.`c1`)))
+DROP TABLE t1;
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(10) CHARACTER SET ucs2 NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('%'),('% ');
+SELECT * FROM t1 WHERE '%'=CONCAT(c1);
+c1
+%
+%
+SELECT * FROM t1 WHERE 'a' LIKE CONCAT(c1);
+c1
+%
+SELECT * FROM t1 WHERE '%'=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+c1
+%
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE '%'=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where (('%' = concat(`test`.`t1`.`c1`)) and ('a' like concat(`test`.`t1`.`c1`)))
+DROP TABLE t1;
+SET NAMES latin1;
#
# MDEV-6661 PI() does not work well in UCS2/UTF16/UTF32 context
#
diff --git a/mysql-test/r/ctype_utf8.result b/mysql-test/r/ctype_utf8.result
index 43f3aa4b6c9..c752024ec7f 100644
--- a/mysql-test/r/ctype_utf8.result
+++ b/mysql-test/r/ctype_utf8.result
@@ -5936,6 +5936,235 @@ set max_sort_length=default;
#
# Start of 10.0 tests
#
+SET NAMES utf8 COLLATE utf8_bin;
+#
+# MDEV-7149 Constant condition propagation erroneously applied for LIKE
+#
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(10) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('a'),('a ');
+SELECT * FROM t1 WHERE CONCAT(c1)='a';
+c1
+a
+a
+SELECT * FROM t1 WHERE CONCAT(c1) LIKE 'a ';
+c1
+a
+SELECT * FROM t1 WHERE CONCAT(c1)='a' AND CONCAT(c1) LIKE 'a ';
+c1
+a
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE CONCAT(c1)='a' AND CONCAT(c1) LIKE 'a ';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where ((concat(`test`.`t1`.`c1`) = 'a') and (concat(`test`.`t1`.`c1`) like 'a '))
+DROP TABLE t1;
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(10) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('a'),('a ');
+SELECT * FROM t1 WHERE 'a'=CONCAT(c1);
+c1
+a
+a
+SELECT * FROM t1 WHERE 'a ' LIKE CONCAT(c1);
+c1
+a
+SELECT * FROM t1 WHERE 'a'=CONCAT(c1) AND 'a ' LIKE CONCAT(c1);
+c1
+a
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE 'a'=CONCAT(c1) AND 'a ' LIKE CONCAT(c1);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where (('a' = concat(`test`.`t1`.`c1`)) and ('a ' like concat(`test`.`t1`.`c1`)))
+DROP TABLE t1;
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(10) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('%'),('% ');
+SELECT * FROM t1 WHERE '% '=CONCAT(c1);
+c1
+%
+%
+SELECT * FROM t1 WHERE 'a' LIKE CONCAT(c1);
+c1
+%
+SELECT * FROM t1 WHERE '% '=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+c1
+%
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE '% '=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where (('% ' = concat(`test`.`t1`.`c1`)) and ('a' like concat(`test`.`t1`.`c1`)))
+DROP TABLE t1;
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(10) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('%'),('% ');
+SELECT * FROM t1 WHERE '%'=CONCAT(c1);
+c1
+%
+%
+SELECT * FROM t1 WHERE 'a' LIKE CONCAT(c1);
+c1
+%
+SELECT * FROM t1 WHERE '%'=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+c1
+%
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE '%'=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where (('%' = concat(`test`.`t1`.`c1`)) and ('a' like concat(`test`.`t1`.`c1`)))
+DROP TABLE t1;
+SET NAMES utf8;
+#
+# MDEV-7149 Constant condition propagation erroneously applied for LIKE
+#
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(10) CHARACTER SET utf8 NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('a'),('a ');
+SELECT * FROM t1 WHERE CONCAT(c1)='a';
+c1
+a
+a
+SELECT * FROM t1 WHERE CONCAT(c1) LIKE 'a ';
+c1
+a
+SELECT * FROM t1 WHERE CONCAT(c1)='a' AND CONCAT(c1) LIKE 'a ';
+c1
+a
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE CONCAT(c1)='a' AND CONCAT(c1) LIKE 'a ';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where ((concat(`test`.`t1`.`c1`) = 'a') and (concat(`test`.`t1`.`c1`) like 'a '))
+DROP TABLE t1;
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(10) CHARACTER SET utf8 NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('a'),('a ');
+SELECT * FROM t1 WHERE 'a'=CONCAT(c1);
+c1
+a
+a
+SELECT * FROM t1 WHERE 'a ' LIKE CONCAT(c1);
+c1
+a
+SELECT * FROM t1 WHERE 'a'=CONCAT(c1) AND 'a ' LIKE CONCAT(c1);
+c1
+a
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE 'a'=CONCAT(c1) AND 'a ' LIKE CONCAT(c1);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where (('a' = concat(`test`.`t1`.`c1`)) and ('a ' like concat(`test`.`t1`.`c1`)))
+DROP TABLE t1;
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(10) CHARACTER SET utf8 NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('%'),('% ');
+SELECT * FROM t1 WHERE '% '=CONCAT(c1);
+c1
+%
+%
+SELECT * FROM t1 WHERE 'a' LIKE CONCAT(c1);
+c1
+%
+SELECT * FROM t1 WHERE '% '=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+c1
+%
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE '% '=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where (('% ' = concat(`test`.`t1`.`c1`)) and ('a' like concat(`test`.`t1`.`c1`)))
+DROP TABLE t1;
+CREATE TABLE t1 AS SELECT REPEAT('a', 10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(10) CHARACTER SET utf8 NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('%'),('% ');
+SELECT * FROM t1 WHERE '%'=CONCAT(c1);
+c1
+%
+%
+SELECT * FROM t1 WHERE 'a' LIKE CONCAT(c1);
+c1
+%
+SELECT * FROM t1 WHERE '%'=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+c1
+%
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE '%'=CONCAT(c1) AND 'a' LIKE CONCAT(c1);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where (('%' = concat(`test`.`t1`.`c1`)) and ('a' like concat(`test`.`t1`.`c1`)))
+DROP TABLE t1;
+#
+# MDEV-7149 Constant condition propagation erroneously applied for LIKE
+#
+CREATE TABLE t1 AS SELECT REPEAT('a',10) AS c1 LIMIT 0;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(10) CHARACTER SET utf8 NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES ('ae'),('ä');
+SELECT * FROM t1 WHERE c1='ä';
+c1
+ä
+SELECT * FROM t1 WHERE c1 LIKE 'ae';
+c1
+ae
+SELECT * FROM t1 WHERE c1='ä' AND c1 LIKE 'ae';
+c1
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE c1='ä' AND c1 LIKE 'ae';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where ((`test`.`t1`.`c1` = 'ä') and (`test`.`t1`.`c1` like 'ae'))
+SELECT * FROM t1 WHERE CONCAT(c1)='ä';
+c1
+ä
+SELECT * FROM t1 WHERE CONCAT(c1) LIKE 'ae';
+c1
+ae
+SELECT * FROM t1 WHERE CONCAT(c1)='ä' AND CONCAT(c1) LIKE 'ae';
+c1
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE CONCAT(c1)='ä' AND CONCAT(c1) LIKE 'ae';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` where ((concat(`test`.`t1`.`c1`) = 'ä') and (concat(`test`.`t1`.`c1`) like 'ae'))
+DROP TABLE IF EXISTS t1;
#
# MDEV-6666 Malformed result for CONCAT(utf8_column, binary_string)
#
diff --git a/mysql-test/r/frm_bad_row_type-7333.result b/mysql-test/r/frm_bad_row_type-7333.result
new file mode 100644
index 00000000000..4df3b70fc9f
--- /dev/null
+++ b/mysql-test/r/frm_bad_row_type-7333.result
@@ -0,0 +1,14 @@
+call mtr.add_suppression("bad_row_type.frm: invalid value 11 for the field row_format");
+select * from bad_row_type;
+category_id category_name
+show create table bad_row_type;
+Table Create Table
+bad_row_type CREATE TABLE `bad_row_type` (
+ `category_id` int(11) NOT NULL AUTO_INCREMENT,
+ `category_name` varchar(255) DEFAULT NULL,
+ PRIMARY KEY (`category_id`)
+) ENGINE=MyISAM DEFAULT CHARSET=utf8 /* `compression`='tokudb_zlib' */
+show table status like 'bad_row_type';
+Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
+bad_row_type MyISAM 10 Dynamic 0 0 0 281474976710655 1024 0 1 x x NULL utf8_general_ci NULL `compression`='tokudb_zlib'
+drop table bad_row_type;
diff --git a/mysql-test/r/func_time.result b/mysql-test/r/func_time.result
index bf07595bc3a..950d2e72666 100644
--- a/mysql-test/r/func_time.result
+++ b/mysql-test/r/func_time.result
@@ -2677,3 +2677,27 @@ Warning 1292 Truncated incorrect time value: '9336:00:00'
Warning 1292 Truncated incorrect time value: '2952:00:00'
Warning 1292 Truncated incorrect time value: '2952:00:00'
DROP TABLE t1;
+#
+# MDEV-7221 from_days fails after null value
+#
+CREATE TABLE t1 (
+id INT(11) NOT NULL PRIMARY KEY,
+date1 DATE NULL DEFAULT NULL
+);
+INSERT INTO t1 VALUES (12, '2011-05-12');
+INSERT INTO t1 VALUES (13, NULL);
+INSERT INTO t1 VALUES (14, '2009-10-23');
+INSERT INTO t1 VALUES (15, '2014-10-30');
+INSERT INTO t1 VALUES (16, NULL);
+INSERT INTO t1 VALUES (17, NULL);
+INSERT INTO t1 VALUES (18, '2010-10-13');
+SELECT a.id,a.date1,FROM_DAYS(TO_DAYS(a.date1)-10) as date2, DATE_ADD(a.date1,INTERVAL -10 DAY),TO_DAYS(a.date1)-10 FROM t1 a ORDER BY a.id;
+id date1 date2 DATE_ADD(a.date1,INTERVAL -10 DAY) TO_DAYS(a.date1)-10
+12 2011-05-12 2011-05-02 2011-05-02 734624
+13 NULL NULL NULL NULL
+14 2009-10-23 2009-10-13 2009-10-13 734058
+15 2014-10-30 2014-10-20 2014-10-20 735891
+16 NULL NULL NULL NULL
+17 NULL NULL NULL NULL
+18 2010-10-13 2010-10-03 2010-10-03 734413
+DROP TABLE t1;
diff --git a/mysql-test/r/group_by.result b/mysql-test/r/group_by.result
index 0858198694b..9a259b3c4c6 100644
--- a/mysql-test/r/group_by.result
+++ b/mysql-test/r/group_by.result
@@ -2494,6 +2494,20 @@ WHERE t1a.c1 = c2 GROUP BY i2;
i2
DROP TABLE t1,t2;
#
+# MDEV-6855
+# MIN(*) with subqueries with IS NOT NULL in WHERE clause crashed.
+#
+CREATE TABLE t1 (i INT, c VARCHAR(3), KEY(c,i)) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (7,'foo'),(0,'bar');
+CREATE TABLE t2 (j INT) ENGINE=MyISAM;
+INSERT INTO t2 VALUES (0),(8),(1),(8),(9);
+SELECT MAX(i), c FROM t1
+WHERE c != 'qux' AND ( SELECT SUM(j) FROM t1, t2 ) IS NOT NULL GROUP BY c;
+MAX(i) c
+0 bar
+7 foo
+drop table t1,t2;
+#
# Bug #58782
# Missing rows with SELECT .. WHERE .. IN subquery
# with full GROUP BY and no aggr
diff --git a/mysql-test/r/group_by_innodb.result b/mysql-test/r/group_by_innodb.result
index 4b5d9990c51..1098579a82d 100644
--- a/mysql-test/r/group_by_innodb.result
+++ b/mysql-test/r/group_by_innodb.result
@@ -57,3 +57,26 @@ i GROUP_CONCAT( d1, d2 ORDER BY d1, d2 )
NULL 11.1,22.2
DROP TABLE t1;
End of 5.5 tests
+#
+# MDEV-5719: Wrong result with GROUP BY and LEFT OUTER JOIN
+#
+CREATE TABLE t1 (oidGroup INT, oid INT PRIMARY KEY)ENGINE=INNODB;
+INSERT INTO t1 VALUES (1,1),(1,2),(1,3),(1,4);
+CREATE TABLE t2 (oid INT PRIMARY KEY)ENGINE=INNODB;
+INSERT INTO t2 VALUES (3);
+SELECT a.oidGroup, a.oid, b.oid FROM t1 a LEFT JOIN t2 b ON
+a.oid=b.oid WHERE a.oidGroup=1;
+oidGroup oid oid
+1 1 NULL
+1 2 NULL
+1 3 3
+1 4 NULL
+SELECT a.oidGroup, a.oid, b.oid FROM t1 a LEFT JOIN t2 b ON
+a.oid=b.oid WHERE a.oidGroup=1 GROUP BY a.oid;
+oidGroup oid oid
+1 1 NULL
+1 2 NULL
+1 3 3
+1 4 NULL
+DROP TABLE t1, t2;
+# End of tests
diff --git a/mysql-test/r/insert_update_autoinc-7150.result b/mysql-test/r/insert_update_autoinc-7150.result
new file mode 100644
index 00000000000..96773479310
--- /dev/null
+++ b/mysql-test/r/insert_update_autoinc-7150.result
@@ -0,0 +1,9 @@
+create table t1 (a int(10) auto_increment primary key, b int(11));
+insert t1 values (null,1);
+insert t1 values (null,2), (1,-1), (null,3) on duplicate key update b=values(b);
+select * from t1;
+a b
+1 -1
+2 2
+3 3
+drop table t1;
diff --git a/mysql-test/r/key_cache.result b/mysql-test/r/key_cache.result
index 8634beb290f..1146ae8bbfa 100644
--- a/mysql-test/r/key_cache.result
+++ b/mysql-test/r/key_cache.result
@@ -435,16 +435,16 @@ p i a
4 3 zzzz
update t1 set p=3 where p=1;
update t2 set i=2 where i=1;
-show status like 'key_%';
-Variable_name Value
-Key_blocks_not_flushed 0
-Key_blocks_unused KEY_BLOCKS_UNUSED
-Key_blocks_used 4
-Key_blocks_warm 0
-Key_read_requests 22
-Key_reads 0
-Key_write_requests 26
-Key_writes 6
+select * from information_schema.session_status where variable_name like 'key_%' and variable_name != 'Key_blocks_unused';
+VARIABLE_NAME VARIABLE_VALUE
+KEY_BLOCKS_NOT_FLUSHED 0
+KEY_BLOCKS_USED 4
+KEY_BLOCKS_WARM 0
+KEY_READ_REQUESTS 22
+KEY_READS 0
+KEY_WRITE_REQUESTS 26
+KEY_WRITES 6
+select variable_value into @key_blocks_unused from information_schema.session_status where variable_name = 'Key_blocks_unused';
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
default NULL NULL 2097152 1024 4 # 0 22 0 26 6
@@ -483,16 +483,18 @@ p i a
4 3 zzzz
update t1 set p=3 where p=1;
update t2 set i=2 where i=1;
-show status like 'key_%';
-Variable_name Value
-Key_blocks_not_flushed 0
-Key_blocks_unused KEY_BLOCKS_UNUSED
-Key_blocks_used 4
-Key_blocks_warm 0
-Key_read_requests 22
-Key_reads 0
-Key_write_requests 26
-Key_writes 6
+select * from information_schema.session_status where variable_name like 'key_%' and variable_name != 'Key_blocks_unused';
+VARIABLE_NAME VARIABLE_VALUE
+KEY_BLOCKS_NOT_FLUSHED 0
+KEY_BLOCKS_USED 4
+KEY_BLOCKS_WARM 0
+KEY_READ_REQUESTS 22
+KEY_READS 0
+KEY_WRITE_REQUESTS 26
+KEY_WRITES 6
+select variable_value < @key_blocks_unused from information_schema.session_status where variable_name = 'Key_blocks_unused';
+variable_value < @key_blocks_unused
+1
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
default 2 NULL 2097152 1024 4 # 0 22 0 26 6
@@ -526,16 +528,18 @@ p i a
4 3 zzzz
update t1 set p=3 where p=1;
update t2 set i=2 where i=1;
-show status like 'key_%';
-Variable_name Value
-Key_blocks_not_flushed 0
-Key_blocks_unused KEY_BLOCKS_UNUSED
-Key_blocks_used 4
-Key_blocks_warm 0
-Key_read_requests 22
-Key_reads 0
-Key_write_requests 26
-Key_writes 6
+select * from information_schema.session_status where variable_name like 'key_%' and variable_name != 'Key_blocks_unused';
+VARIABLE_NAME VARIABLE_VALUE
+KEY_BLOCKS_NOT_FLUSHED 0
+KEY_BLOCKS_USED 4
+KEY_BLOCKS_WARM 0
+KEY_READ_REQUESTS 22
+KEY_READS 0
+KEY_WRITE_REQUESTS 26
+KEY_WRITES 6
+select variable_value = @key_blocks_unused from information_schema.session_status where variable_name = 'Key_blocks_unused';
+variable_value = @key_blocks_unused
+1
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
default 1 NULL 2097152 1024 4 # 0 22 0 26 6
diff --git a/mysql-test/r/kill-2.result b/mysql-test/r/kill-2.result
new file mode 100644
index 00000000000..c2bcc979441
--- /dev/null
+++ b/mysql-test/r/kill-2.result
@@ -0,0 +1,10 @@
+#
+# MDEV-6896 kill user command cause MariaDB crash!
+#
+create user foo@'127.0.0.1';
+select user from information_schema.processlist;
+user
+foo
+root
+kill user foo@'127.0.0.1';
+drop user foo@'127.0.0.1';
diff --git a/mysql-test/r/kill_processlist-6619.result b/mysql-test/r/kill_processlist-6619.result
index 588c8e6d139..7dd42790cc7 100644
--- a/mysql-test/r/kill_processlist-6619.result
+++ b/mysql-test/r/kill_processlist-6619.result
@@ -3,11 +3,15 @@ SHOW PROCESSLIST;
Id User Host db Command Time State Info Progress
# root # test Sleep # # NULL 0.000
# root # test Query # # SHOW PROCESSLIST 0.000
+SET DEBUG_SYNC='before_execute_sql_command SIGNAL ready WAIT_FOR go';
+SHOW PROCESSLIST;
connection default;
+SET DEBUG_SYNC='now WAIT_FOR ready';
KILL QUERY con_id;
+SET DEBUG_SYNC='now SIGNAL go';
connection con1;
-SHOW PROCESSLIST;
ERROR 70100: Query execution was interrupted
+SET DEBUG_SYNC='reset';
SHOW PROCESSLIST;
Id User Host db Command Time State Info Progress
# root # test Sleep # # NULL 0.000
diff --git a/mysql-test/r/kill_query-6728.result b/mysql-test/r/kill_query-6728.result
new file mode 100644
index 00000000000..6638edebeea
--- /dev/null
+++ b/mysql-test/r/kill_query-6728.result
@@ -0,0 +1,7 @@
+connect con1,localhost,root,,;
+connection default;
+kill query id;
+connection con1;
+select count(*) > 0 from mysql.user;
+count(*) > 0
+1
diff --git a/mysql-test/r/log_tables.result b/mysql-test/r/log_tables.result
index 4471c01c99b..04cd2f01050 100644
--- a/mysql-test/r/log_tables.result
+++ b/mysql-test/r/log_tables.result
@@ -536,7 +536,8 @@ CREATE TABLE `db_17876.slow_log_data` (
`last_insert_id` int(11) default NULL,
`insert_id` int(11) default NULL,
`server_id` int(11) default NULL,
-`sql_text` mediumtext
+`sql_text` mediumtext,
+`thread_id` bigint(21) unsigned default NULL
);
CREATE TABLE `db_17876.general_log_data` (
`event_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
diff --git a/mysql-test/r/mdev6830.result b/mysql-test/r/mdev6830.result
new file mode 100644
index 00000000000..0570659e860
--- /dev/null
+++ b/mysql-test/r/mdev6830.result
@@ -0,0 +1,49 @@
+drop table if exists t1,t2,t3;
+drop view if exists v2,v3;
+CREATE TABLE t1 (pk INT PRIMARY KEY) ENGINE=MyISAM;
+CREATE TABLE t2 (
+f1 DATE,
+f2 VARCHAR(1024),
+f3 VARCHAR(10),
+f4 DATE,
+f5 VARCHAR(10),
+f6 VARCHAR(10),
+f7 VARCHAR(10),
+f8 DATETIME,
+f9 INT,
+f10 VARCHAR(1024),
+f11 VARCHAR(1024),
+f12 INT,
+f13 VARCHAR(1024)
+) ENGINE=MyISAM;
+CREATE OR REPLACE VIEW v2 AS SELECT * FROM t2;
+CREATE TABLE t3 (
+f1 VARCHAR(1024),
+f2 VARCHAR(1024),
+f3 DATETIME,
+f4 VARCHAR(10),
+f5 INT,
+f6 VARCHAR(10),
+f7 VARCHAR(1024),
+f8 VARCHAR(10),
+f9 INT,
+f10 DATE,
+f11 INT,
+f12 VARCHAR(1024),
+f13 VARCHAR(10),
+f14 DATE,
+f15 DATETIME
+) ENGINE=MyISAM;
+CREATE OR REPLACE ALGORITHM=TEMPTABLE VIEW v3 AS SELECT * FROM t3;
+INSERT INTO t3 VALUES
+('FOO','foo','2000-08-04 00:00:00','one',1,'1','FOO','foo',1,'2004-05-09',1,'one','one','2001-12-07','2001-10-17 08:25:04'),
+('BAR','bar','2001-01-01 04:52:37','two',2,'2','BAR','bar',2,'2008-01-01',2,'two','two','2006-06-19','2002-01-01 08:22:49');
+CREATE TABLE t4 (f1 VARCHAR(10), f2 INT) ENGINE=MyISAM;
+SELECT * FROM t1;
+pk
+SELECT non_existing FROM v2;
+ERROR 42S22: Unknown column 'non_existing' in 'field list'
+SELECT * FROM t1, v3, t4 WHERE v3.f1 = t4.f1 AND t4.f2 = 6 AND t1.pk = v3.f5;
+pk f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13 f14 f15 f1 f2
+drop table t1,t2,t3,t4;
+drop view v2,v3;
diff --git a/mysql-test/r/not_embedded.require b/mysql-test/r/not_embedded.require
deleted file mode 100644
index b2ea98bcd0a..00000000000
--- a/mysql-test/r/not_embedded.require
+++ /dev/null
@@ -1,2 +0,0 @@
-have_embedded
-0
diff --git a/mysql-test/r/processlist.result b/mysql-test/r/processlist.result
index 0182245c278..eb3af67c5bf 100644
--- a/mysql-test/r/processlist.result
+++ b/mysql-test/r/processlist.result
@@ -7,9 +7,8 @@ SELECT ID, TIME,TIME_MS FROM INFORMATION_SCHEMA.PROCESSLIST WHERE CONCAT(":", ID
SET DEBUG_SYNC = 'now SIGNAL fill_schema_proceed';
ID TIME TIME_MS
TID 0 0.000
-SET DEBUG_SYNC = 'dispatch_command_end SIGNAL query_done EXECUTE 2';
-SET DEBUG_SYNC= 'now WAIT_FOR query_done';
-SET DEBUG_SYNC= 'now SIGNAL nosignal';
+set debug_sync='reset';
+SET debug_dbug="+d,sleep_inject_query_done_debug_sync";
select sleep(5);
sleep(5)
0
diff --git a/mysql-test/r/select_found.result b/mysql-test/r/select_found.result
index 04eb2c90d31..92758fa134b 100644
--- a/mysql-test/r/select_found.result
+++ b/mysql-test/r/select_found.result
@@ -332,3 +332,19 @@ select found_rows() as count;
count
2
drop table t1, t2;
+create table t1 (i int, v varchar(64), key (i));
+select sql_calc_found_rows * from t1 where i = 0 order by v limit 59,2;
+i v
+0 foo
+0 foo
+select found_rows();
+found_rows()
+75
+select sql_calc_found_rows * from t1 ignore index (i) where i = 0 order by v limit 59,2;
+i v
+0 foo
+0 foo
+select found_rows();
+found_rows()
+75
+drop table t1;
diff --git a/mysql-test/r/show_check.result b/mysql-test/r/show_check.result
index 690905153f8..f8f4b478e65 100644
--- a/mysql-test/r/show_check.result
+++ b/mysql-test/r/show_check.result
@@ -1456,12 +1456,6 @@ GRANT PROCESS ON *.* TO test_u@localhost;
SHOW ENGINE MYISAM MUTEX;
SHOW ENGINE MYISAM STATUS;
DROP USER test_u@localhost;
-#
-# Bug #48985: show create table crashes if previous access to the table
-# was killed
-#
-SHOW CREATE TABLE non_existent;
-ERROR 70100: Query execution was interrupted
End of 5.1 tests
#
# Bug#52593 SHOW CREATE TABLE is blocked if table is locked
diff --git a/mysql-test/r/sp-innodb.result b/mysql-test/r/sp-innodb.result
new file mode 100644
index 00000000000..da02957d3c9
--- /dev/null
+++ b/mysql-test/r/sp-innodb.result
@@ -0,0 +1,32 @@
+drop table if exists t1,t2;
+drop procedure if exists p1;
+#
+#MDEV-6985: MariaDB crashes on stored procedure call
+#
+CREATE TABLE `t1` (
+`ID` int(11) NOT NULL,
+PRIMARY KEY (`ID`)
+) ENGINE=InnoDB;
+CREATE TABLE `t2` (
+`ID` int(11) NOT NULL,
+`DATE` datetime DEFAULT NULL,
+PRIMARY KEY (`ID`)
+) ENGINE=InnoDB;
+CREATE PROCEDURE `p1`()
+BEGIN
+DECLARE _mySelect CURSOR FOR
+SELECT DISTINCT t1.ID
+FROM t1
+LEFT JOIN t2 AS t2 ON
+t2.ID = t1.ID
+AND t2.DATE = (
+SELECT MAX(T3.DATE) FROM t2 AS T3 WHERE T3.ID = t2.ID AND T3.DATE<=NOW()
+)
+WHERE t1.ID = 1;
+OPEN _mySelect;
+CLOSE _mySelect;
+END ;;
+CALL p1();
+CALL p1();
+drop procedure p1;
+drop table t1,t2;
diff --git a/mysql-test/r/statistics_index_crash-7362.result b/mysql-test/r/statistics_index_crash-7362.result
new file mode 100644
index 00000000000..99f65d7e1b7
--- /dev/null
+++ b/mysql-test/r/statistics_index_crash-7362.result
@@ -0,0 +1,33 @@
+CREATE TABLE t1 (a longtext, FULLTEXT KEY (`a`)) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (unhex('3E0D0A4141414142334E7A6143317963324541414141424977414141674541726D'));
+ANALYZE TABLE t1 PERSISTENT FOR ALL;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+SELECT * FROM mysql.index_stats WHERE index_name='a' AND table_name='t1';
+db_name table_name index_name prefix_arity avg_frequency
+test t1 a 1 NULL
+DROP TABLE t1;
+CREATE TABLE t1 (a longtext, FULLTEXT KEY (`a`)) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (unhex('3E0D0A4141414142334E7A6143317963324541414141424977414141674541726D'));
+ANALYZE TABLE t1 PERSISTENT FOR ALL;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+SELECT * FROM mysql.index_stats WHERE index_name='a' AND table_name='t1';
+db_name table_name index_name prefix_arity avg_frequency
+test t1 a 1 NULL
+DROP TABLE t1;
+CREATE TABLE geom (g GEOMETRY NOT NULL, SPATIAL INDEX(g)) ENGINE=MyISAM;
+INSERT INTO geom VALUES
+(MultiPolygonFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')),
+(MPolyFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')),
+(MPolyFromWKB(AsWKB(MultiPolygon(Polygon(LineString(Point(0, 3), Point(3, 3), Point(3, 0), Point(0, 3)))))));
+ANALYZE TABLE geom PERSISTENT FOR ALL;
+Table Op Msg_type Msg_text
+test.geom analyze status Engine-independent statistics collected
+test.geom analyze status OK
+SELECT * FROM mysql.index_stats WHERE index_name='g' AND table_name='geom';
+db_name table_name index_name prefix_arity avg_frequency
+test geom g 1 NULL
+DROP TABLE geom;
diff --git a/mysql-test/r/type_timestamp.result b/mysql-test/r/type_timestamp.result
index e7add0d80a7..38838e72e96 100644
--- a/mysql-test/r/type_timestamp.result
+++ b/mysql-test/r/type_timestamp.result
@@ -645,3 +645,49 @@ MAX(dt) = '2011-01-06 12:34:30'
1
DROP TABLE t1;
End of 5.5 tests
+#
+# MDEV-7254: Assigned expression is evaluated twice when updating column TIMESTAMP NOT NULL
+#
+SET time_zone='+02:00';
+create table t1(value timestamp not null);
+set @a:=0;
+create function f1 () returns timestamp
+begin
+set @a = @a + 1;
+return NULL;
+end//
+set timestamp=12340;
+insert t1 values (f1());
+select @a, value from t1;
+@a value
+1 1970-01-01 05:25:40
+set timestamp=12350;
+update t1 set value = f1();
+select @a, value from t1;
+@a value
+2 1970-01-01 05:25:50
+drop table t1;
+drop function f1;
+set timestamp=0;
+create table t1(value timestamp null);
+set @a:=0;
+create function f1 () returns timestamp
+begin
+set @a = @a + 1;
+return NULL;
+end//
+set timestamp=12340;
+insert t1 values (f1());
+select @a, value from t1;
+@a value
+1 NULL
+set timestamp=12350;
+update t1 set value = f1();
+select @a, value from t1;
+@a value
+2 NULL
+drop table t1;
+drop function f1;
+set timestamp=0;
+SET time_zone=DEFAULT;
+End of 10.0 tests
diff --git a/mysql-test/r/view.result b/mysql-test/r/view.result
index ff155e5fe15..23d1d76ca26 100644
--- a/mysql-test/r/view.result
+++ b/mysql-test/r/view.result
@@ -5394,6 +5394,8 @@ SELECT `f1`(1);
DROP FUNCTION f1;
DROP VIEW v1;
DROP TABLE t1, t2;
+create view v1 as select 1;
+drop view v1;
# -----------------------------------------------------------------
# -- End of 5.5 tests.
# -----------------------------------------------------------------
diff --git a/mysql-test/r/windows.result b/mysql-test/r/windows.result
index d0cdd858d4a..05e4600f4b7 100644
--- a/mysql-test/r/windows.result
+++ b/mysql-test/r/windows.result
@@ -60,3 +60,8 @@ SELECT VARIABLE_NAME FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
WHERE VARIABLE_NAME = 'socket';
VARIABLE_NAME
SOCKET
+#
+# Bug#16581605: REPLACE.EXE UTILITY IS BROKEN IN 5.5
+#
+xyz
+def
diff --git a/mysql-test/std_data/bad_row_type.MYD b/mysql-test/std_data/bad_row_type.MYD
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/mysql-test/std_data/bad_row_type.MYD
diff --git a/mysql-test/std_data/bad_row_type.MYI b/mysql-test/std_data/bad_row_type.MYI
new file mode 100644
index 00000000000..731a83ed078
--- /dev/null
+++ b/mysql-test/std_data/bad_row_type.MYI
Binary files differ
diff --git a/mysql-test/std_data/bad_row_type.frm b/mysql-test/std_data/bad_row_type.frm
new file mode 100644
index 00000000000..d993c98d73e
--- /dev/null
+++ b/mysql-test/std_data/bad_row_type.frm
Binary files differ
diff --git a/mysql-test/suite.pm b/mysql-test/suite.pm
index ac4068a6373..bef37ac4d04 100644
--- a/mysql-test/suite.pm
+++ b/mysql-test/suite.pm
@@ -53,7 +53,7 @@ sub skip_combinations {
$skip{'include/check_ipv6.inc'} = 'No IPv6' unless ipv6_ok();
$skip{'t/openssl_6975.test'} = 'no or too old openssl'
- unless ! IS_WINDOWS and ! system "openssl ciphers TLSv1.2 2>&1 >/dev/null";
+ unless ! IS_WINDOWS and ! system "openssl ciphers TLSv1.2 >/dev/null 2>&1";
%skip;
}
diff --git a/mysql-test/suite/binlog/r/binlog_checkpoint.result b/mysql-test/suite/binlog/r/binlog_checkpoint.result
index 2ce9ed760f5..f76fc6da189 100644
--- a/mysql-test/suite/binlog/r/binlog_checkpoint.result
+++ b/mysql-test/suite/binlog/r/binlog_checkpoint.result
@@ -112,6 +112,18 @@ master-bin.000003 #
master-bin.000004 #
master-bin.000005 #
master-bin.000006 #
+SET debug_sync = 'reset';
+*** MDEV-7402: 'reset master' hangs, waits for signalled COND_xid_list ***
+SET debug_sync="reset_logs_after_set_reset_master_pending SIGNAL reset_master_ready WAIT_FOR reset_master_cont";
+RESET MASTER;
+SET @old_dbug= @@global.DEBUG_DBUG;
+SET GLOBAL debug_dbug="+d,inject_binlog_background_thread_before_mark_xid_done";
+SET debug_sync="now WAIT_FOR reset_master_ready";
+RESET MASTER;
+SET debug_sync="now WAIT_FOR injected_binlog_background_thread";
+SET GLOBAL debug_dbug=@old_dbug;
+SET debug_sync="now SIGNAL reset_master_cont";
+SET debug_sync = 'reset';
DROP TABLE t1, t2;
SET GLOBAL max_binlog_size= @old_max_binlog_size;
SET GLOBAL innodb_flush_log_at_trx_commit= @old_innodb_flush_log_at_trx_commit;
diff --git a/mysql-test/suite/binlog/r/binlog_switch_inside_trans.result b/mysql-test/suite/binlog/r/binlog_switch_inside_trans.result
index 48b6dfa61d9..0819d4e778f 100644
--- a/mysql-test/suite/binlog/r/binlog_switch_inside_trans.result
+++ b/mysql-test/suite/binlog/r/binlog_switch_inside_trans.result
@@ -1,6 +1,5 @@
set @save_binlog_format= @@global.binlog_format;
set @save_binlog_dirct= @@global.binlog_direct_non_transactional_updates;
-set @save_sql_log_bin= @@global.sql_log_bin;
create table t1 (a int) engine= myisam;
create table t2 (a int) engine= innodb;
SELECT @@session.binlog_format;
@@ -129,7 +128,7 @@ commit;
begin;
insert into t2 values (5);
# Test that the global variable 'binlog_format' and
-# 'binlog_direct_non_transactional_updates' and 'sql_log_bin' are
+# 'binlog_direct_non_transactional_updates' are
# writable inside a transaction.
# Current session values are ROW, FALSE, TRUE respectively.
SELECT @@global.binlog_format;
@@ -137,20 +136,15 @@ SELECT @@global.binlog_format;
ROW
set @@global.binlog_format= statement;
set @@global.binlog_direct_non_transactional_updates= TRUE;
-set @@global.sql_log_bin= FALSE;
SELECT @@global.binlog_format;
@@global.binlog_format
STATEMENT
SELECT @@global.binlog_direct_non_transactional_updates;
@@global.binlog_direct_non_transactional_updates
1
-SELECT @@global.sql_log_bin;
-@@global.sql_log_bin
-0
commit;
set @@global.binlog_format= @save_binlog_format;
set @@global.binlog_direct_non_transactional_updates= @save_binlog_dirct;
-set @@global.sql_log_bin= @save_sql_log_bin;
create table t3(a int, b int) engine= innodb;
create table t4(a int) engine= innodb;
create table t5(a int) engine= innodb;
diff --git a/mysql-test/suite/binlog/t/binlog_checkpoint.test b/mysql-test/suite/binlog/t/binlog_checkpoint.test
index 356f860af32..cdb71887ad6 100644
--- a/mysql-test/suite/binlog/t/binlog_checkpoint.test
+++ b/mysql-test/suite/binlog/t/binlog_checkpoint.test
@@ -138,8 +138,39 @@ SET DEBUG_SYNC= "now WAIT_FOR injected_binlog_background_thread";
SET GLOBAL debug_dbug= @old_dbug;
INSERT INTO t1 VALUES (31, REPEAT("x", 4100));
--source include/show_binary_logs.inc
+SET debug_sync = 'reset';
+
+
+--echo *** MDEV-7402: 'reset master' hangs, waits for signalled COND_xid_list ***
+
+--source include/wait_for_binlog_checkpoint.inc
+
+connect(con3,localhost,root,,);
+# Make the binlog background thread wait before clearing the pending checkpoint.
+# The bug was that one RESET MASTER would clear the reset_master_pending
+# flag set by another RESET MASTER; this could cause the wakeup from the
+# binlog background thread not to be sent, and thus the second RESET MASTER
+# to wait infinitely.
+SET debug_sync="reset_logs_after_set_reset_master_pending SIGNAL reset_master_ready WAIT_FOR reset_master_cont";
+send RESET MASTER;
+
+--connection default
+SET @old_dbug= @@global.DEBUG_DBUG;
+SET GLOBAL debug_dbug="+d,inject_binlog_background_thread_before_mark_xid_done";
+SET debug_sync="now WAIT_FOR reset_master_ready";
+RESET MASTER;
+SET debug_sync="now WAIT_FOR injected_binlog_background_thread";
+SET GLOBAL debug_dbug=@old_dbug;
+SET debug_sync="now SIGNAL reset_master_cont";
+
+--connection con3
+REAP;
+
+--connection default
+SET debug_sync = 'reset';
+# Clean up.
DROP TABLE t1, t2;
SET GLOBAL max_binlog_size= @old_max_binlog_size;
SET GLOBAL innodb_flush_log_at_trx_commit= @old_innodb_flush_log_at_trx_commit;
diff --git a/mysql-test/suite/binlog/t/binlog_grant.test b/mysql-test/suite/binlog/t/binlog_grant.test
index f0367b3aa79..0c9d9a45ec9 100644
--- a/mysql-test/suite/binlog/t/binlog_grant.test
+++ b/mysql-test/suite/binlog/t/binlog_grant.test
@@ -15,7 +15,7 @@ show grants for mysqltest_1@localhost;
connect (plain,localhost,mysqltest_1,,test);
connect (root,localhost,root,,test);
-# Testing setting both session and global SQL_LOG_BIN variable both as
+# Testing setting session SQL_LOG_BIN variable both as
# root and as plain user.
--echo **** Variable SQL_LOG_BIN ****
diff --git a/mysql-test/suite/binlog/t/binlog_switch_inside_trans.test b/mysql-test/suite/binlog/t/binlog_switch_inside_trans.test
index 06c5e78bd0e..a93cd44fa17 100644
--- a/mysql-test/suite/binlog/t/binlog_switch_inside_trans.test
+++ b/mysql-test/suite/binlog/t/binlog_switch_inside_trans.test
@@ -10,7 +10,6 @@ source include/have_binlog_format_row.inc;
set @save_binlog_format= @@global.binlog_format;
set @save_binlog_dirct= @@global.binlog_direct_non_transactional_updates;
-set @save_sql_log_bin= @@global.sql_log_bin;
create table t1 (a int) engine= myisam;
create table t2 (a int) engine= innodb;
@@ -117,21 +116,18 @@ commit;
begin;
insert into t2 values (5);
--echo # Test that the global variable 'binlog_format' and
---echo # 'binlog_direct_non_transactional_updates' and 'sql_log_bin' are
+--echo # 'binlog_direct_non_transactional_updates' are
--echo # writable inside a transaction.
--echo # Current session values are ROW, FALSE, TRUE respectively.
SELECT @@global.binlog_format;
set @@global.binlog_format= statement;
set @@global.binlog_direct_non_transactional_updates= TRUE;
- set @@global.sql_log_bin= FALSE;
SELECT @@global.binlog_format;
SELECT @@global.binlog_direct_non_transactional_updates;
- SELECT @@global.sql_log_bin;
commit;
set @@global.binlog_format= @save_binlog_format;
set @@global.binlog_direct_non_transactional_updates= @save_binlog_dirct;
-set @@global.sql_log_bin= @save_sql_log_bin;
create table t3(a int, b int) engine= innodb;
create table t4(a int) engine= innodb;
diff --git a/mysql-test/suite/engines/funcs/combinations b/mysql-test/suite/engines/funcs/combinations
new file mode 100644
index 00000000000..4d236d27b8f
--- /dev/null
+++ b/mysql-test/suite/engines/funcs/combinations
@@ -0,0 +1,11 @@
+[innodb]
+innodb
+default-storage-engine=innodb
+partition
+
+[myisam]
+skip-innodb
+default-storage-engine=myisam
+partition
+
+
diff --git a/mysql-test/suite/engines/funcs/r/db_create_drop.result b/mysql-test/suite/engines/funcs/r/db_create_drop.result
index 89c3b3ce991..85a871cf135 100644
--- a/mysql-test/suite/engines/funcs/r/db_create_drop.result
+++ b/mysql-test/suite/engines/funcs/r/db_create_drop.result
@@ -2,8 +2,8 @@ DROP DATABASE IF EXISTS d1;
CREATE DATABASE d1;
SHOW DATABASES;
Database
-information_schema
d1
+information_schema
mtr
mysql
performance_schema
@@ -13,8 +13,8 @@ Database (d%)
d1
SHOW DATABASES LIKE '%';
Database (%)
-information_schema
d1
+information_schema
mtr
mysql
performance_schema
@@ -24,8 +24,8 @@ DROP DATABASE d1;
CREATE SCHEMA d1;
SHOW SCHEMAS;
Database
-information_schema
d1
+information_schema
mtr
mysql
performance_schema
@@ -35,8 +35,8 @@ Database (d%)
d1
SHOW SCHEMAS LIKE '%';
Database (%)
-information_schema
d1
+information_schema
mtr
mysql
performance_schema
diff --git a/mysql-test/suite/engines/funcs/r/db_create_error.result b/mysql-test/suite/engines/funcs/r/db_create_error.result
index 35d5d4abfeb..d18fe7ee5fd 100644
--- a/mysql-test/suite/engines/funcs/r/db_create_error.result
+++ b/mysql-test/suite/engines/funcs/r/db_create_error.result
@@ -2,8 +2,8 @@ DROP DATABASE IF EXISTS d4;
CREATE DATABASE d4;
SHOW DATABASES;
Database
-information_schema
d4
+information_schema
mtr
mysql
performance_schema
diff --git a/mysql-test/suite/engines/funcs/r/db_create_if_not_exists.result b/mysql-test/suite/engines/funcs/r/db_create_if_not_exists.result
index c9e31817910..2637087da5b 100644
--- a/mysql-test/suite/engines/funcs/r/db_create_if_not_exists.result
+++ b/mysql-test/suite/engines/funcs/r/db_create_if_not_exists.result
@@ -5,8 +5,8 @@ Warnings:
Note 1007 Can't create database 'd2'; database exists
SHOW DATABASES;
Database
-information_schema
d2
+information_schema
mtr
mysql
performance_schema
@@ -22,8 +22,8 @@ Warnings:
Note 1007 Can't create database 'd2'; database exists
SHOW DATABASES;
Database
-information_schema
d2
+information_schema
mtr
mysql
performance_schema
diff --git a/mysql-test/suite/engines/funcs/r/db_drop_error.result b/mysql-test/suite/engines/funcs/r/db_drop_error.result
index 8db8cb598af..24a51768103 100644
--- a/mysql-test/suite/engines/funcs/r/db_drop_error.result
+++ b/mysql-test/suite/engines/funcs/r/db_drop_error.result
@@ -2,8 +2,8 @@ DROP DATABASE IF EXISTS d5;
CREATE DATABASE d5;
SHOW DATABASES;
Database
-information_schema
d5
+information_schema
mtr
mysql
performance_schema
diff --git a/mysql-test/suite/engines/funcs/r/db_use_error.result b/mysql-test/suite/engines/funcs/r/db_use_error.result
index 2051b56b19e..c2fce4f0acf 100644
--- a/mysql-test/suite/engines/funcs/r/db_use_error.result
+++ b/mysql-test/suite/engines/funcs/r/db_use_error.result
@@ -2,8 +2,8 @@ DROP DATABASE IF EXISTS d6;
CREATE DATABASE d6;
SHOW DATABASES;
Database
-information_schema
d6
+information_schema
mtr
mysql
performance_schema
diff --git a/mysql-test/suite/engines/funcs/r/sf_alter.result b/mysql-test/suite/engines/funcs/r/sf_alter.result
index 8885b20d557..e89f529ba09 100644
--- a/mysql-test/suite/engines/funcs/r/sf_alter.result
+++ b/mysql-test/suite/engines/funcs/r/sf_alter.result
@@ -35,8 +35,6 @@ ALTER FUNCTION sf1 #DET# ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -54,8 +52,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -76,9 +72,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -105,8 +98,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -172,8 +163,6 @@ ALTER FUNCTION sf1 #DET# CONTAINS SQL ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -191,8 +180,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -213,9 +200,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -242,8 +226,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -309,8 +291,6 @@ ALTER FUNCTION sf1 #DET# NO SQL ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -328,8 +308,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -350,9 +328,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -379,8 +354,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -446,8 +419,6 @@ ALTER FUNCTION sf1 #DET# READS SQL DATA ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -465,8 +436,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -487,9 +456,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -516,8 +482,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -583,8 +547,6 @@ ALTER FUNCTION sf1 #DET# MODIFIES SQL DATA ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -602,8 +564,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -624,9 +584,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -653,8 +610,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -720,8 +675,6 @@ ALTER FUNCTION sf1 #DET# COMMENT 'comment' ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -739,8 +692,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -761,9 +712,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -790,8 +738,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -857,8 +803,6 @@ ALTER FUNCTION sf1 #DET# CONTAINS SQL COMMENT 'comment' ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -876,8 +820,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -898,9 +840,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -927,8 +866,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -994,8 +931,6 @@ ALTER FUNCTION sf1 #DET# NO SQL COMMENT 'comment' ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1013,8 +948,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1035,9 +968,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1064,8 +994,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1131,8 +1059,6 @@ ALTER FUNCTION sf1 #DET# READS SQL DATA COMMENT 'comment' ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1150,8 +1076,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1172,9 +1096,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1201,8 +1122,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1268,8 +1187,6 @@ ALTER FUNCTION sf1 #DET# MODIFIES SQL DATA COMMENT 'comment' ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1287,8 +1204,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1309,9 +1224,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1338,8 +1250,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1405,8 +1315,6 @@ ALTER FUNCTION sf1 LANGUAGE SQL #DET# ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1424,8 +1332,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1446,9 +1352,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1475,8 +1378,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1542,8 +1443,6 @@ ALTER FUNCTION sf1 LANGUAGE SQL #DET# CONTAINS SQL ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1561,8 +1460,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1583,9 +1480,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1612,8 +1506,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1679,8 +1571,6 @@ ALTER FUNCTION sf1 LANGUAGE SQL #DET# NO SQL ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1698,8 +1588,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1720,9 +1608,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1749,8 +1634,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1816,8 +1699,6 @@ ALTER FUNCTION sf1 LANGUAGE SQL #DET# READS SQL DATA ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1835,8 +1716,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1857,9 +1736,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1886,8 +1762,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1953,8 +1827,6 @@ ALTER FUNCTION sf1 LANGUAGE SQL #DET# MODIFIES SQL DATA ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1972,8 +1844,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1994,9 +1864,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2023,8 +1890,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2090,8 +1955,6 @@ ALTER FUNCTION sf1 LANGUAGE SQL #DET# COMMENT 'comment' ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2109,8 +1972,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2131,9 +1992,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2160,8 +2018,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2227,8 +2083,6 @@ ALTER FUNCTION sf1 LANGUAGE SQL #DET# CONTAINS SQL COMMENT 'comment' ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2246,8 +2100,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2268,9 +2120,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2297,8 +2146,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2364,8 +2211,6 @@ ALTER FUNCTION sf1 LANGUAGE SQL #DET# NO SQL COMMENT 'comment' ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2383,8 +2228,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2405,9 +2248,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2434,8 +2274,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2501,8 +2339,6 @@ ALTER FUNCTION sf1 LANGUAGE SQL #DET# READS SQL DATA COMMENT 'comment' ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2520,8 +2356,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2542,9 +2376,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2571,8 +2402,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2638,8 +2467,6 @@ ALTER FUNCTION sf1 LANGUAGE SQL #DET# MODIFIES SQL DATA COMMENT 'comment' ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2657,8 +2484,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2679,9 +2504,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2708,8 +2530,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2775,8 +2595,6 @@ ALTER FUNCTION sf1 #DET# SQL SECURITY INVOKER ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2794,8 +2612,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2816,9 +2632,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2845,8 +2658,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2912,8 +2723,6 @@ ALTER FUNCTION sf1 #DET# CONTAINS SQL SQL SECURITY INVOKER ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2931,8 +2740,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2953,9 +2760,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2982,8 +2786,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -3049,8 +2851,6 @@ ALTER FUNCTION sf1 #DET# NO SQL SQL SECURITY INVOKER ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -3068,8 +2868,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -3090,9 +2888,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -3119,8 +2914,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -3186,8 +2979,6 @@ ALTER FUNCTION sf1 #DET# READS SQL DATA SQL SECURITY INVOKER ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -3205,8 +2996,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -3227,9 +3016,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -3256,8 +3042,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -3323,8 +3107,6 @@ ALTER FUNCTION sf1 #DET# MODIFIES SQL DATA SQL SECURITY INVOKER ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -3342,8 +3124,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -3364,9 +3144,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -3393,8 +3170,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -3460,8 +3235,6 @@ ALTER FUNCTION sf1 #DET# SQL SECURITY INVOKER COMMENT 'comment' ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -3479,8 +3252,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -3501,9 +3272,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -3530,8 +3298,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -3597,8 +3363,6 @@ ALTER FUNCTION sf1 #DET# CONTAINS SQL SQL SECURITY INVOKER COMMENT 'comment'
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -3616,8 +3380,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -3638,9 +3400,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -3667,8 +3426,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -3734,8 +3491,6 @@ ALTER FUNCTION sf1 #DET# NO SQL SQL SECURITY INVOKER COMMENT 'comment' ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -3753,8 +3508,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -3775,9 +3528,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -3804,8 +3554,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -3871,8 +3619,6 @@ ALTER FUNCTION sf1 #DET# READS SQL DATA SQL SECURITY INVOKER COMMENT 'comment
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -3890,8 +3636,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -3912,9 +3656,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -3941,8 +3682,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -4008,8 +3747,6 @@ ALTER FUNCTION sf1 #DET# MODIFIES SQL DATA SQL SECURITY INVOKER COMMENT 'comm
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -4027,8 +3764,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -4049,9 +3784,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -4078,8 +3810,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -4145,8 +3875,6 @@ ALTER FUNCTION sf1 LANGUAGE SQL #DET# SQL SECURITY INVOKER ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -4164,8 +3892,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -4186,9 +3912,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -4215,8 +3938,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -4282,8 +4003,6 @@ ALTER FUNCTION sf1 LANGUAGE SQL #DET# CONTAINS SQL SQL SECURITY INVOKER ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -4301,8 +4020,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -4323,9 +4040,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -4352,8 +4066,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -4419,8 +4131,6 @@ ALTER FUNCTION sf1 LANGUAGE SQL #DET# NO SQL SQL SECURITY INVOKER ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -4438,8 +4148,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -4460,9 +4168,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -4489,8 +4194,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -4556,8 +4259,6 @@ ALTER FUNCTION sf1 LANGUAGE SQL #DET# READS SQL DATA SQL SECURITY INVOKER ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -4575,8 +4276,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -4597,9 +4296,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -4626,8 +4322,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -4693,8 +4387,6 @@ ALTER FUNCTION sf1 LANGUAGE SQL #DET# MODIFIES SQL DATA SQL SECURITY INVOKER
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -4712,8 +4404,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -4734,9 +4424,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -4763,8 +4450,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -4830,8 +4515,6 @@ ALTER FUNCTION sf1 LANGUAGE SQL #DET# SQL SECURITY INVOKER COMMENT 'comment'
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -4849,8 +4532,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -4871,9 +4552,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -4900,8 +4578,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -4967,8 +4643,6 @@ ALTER FUNCTION sf1 LANGUAGE SQL #DET# CONTAINS SQL SQL SECURITY INVOKER COMMENT
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -4986,8 +4660,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -5008,9 +4680,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -5037,8 +4706,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -5104,8 +4771,6 @@ ALTER FUNCTION sf1 LANGUAGE SQL #DET# NO SQL SQL SECURITY INVOKER COMMENT 'comm
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -5123,8 +4788,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -5145,9 +4808,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -5174,8 +4834,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -5241,8 +4899,6 @@ ALTER FUNCTION sf1 LANGUAGE SQL #DET# READS SQL DATA SQL SECURITY INVOKER COMME
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -5260,8 +4916,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -5282,9 +4936,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -5311,8 +4962,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -5378,8 +5027,6 @@ ALTER FUNCTION sf1 LANGUAGE SQL #DET# MODIFIES SQL DATA SQL SECURITY INVOKER CO
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -5397,8 +5044,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -5419,9 +5064,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -5448,8 +5090,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -5515,8 +5155,6 @@ ALTER FUNCTION sf1 #DET# SQL SECURITY DEFINER ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -5534,8 +5172,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -5556,9 +5192,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -5585,8 +5218,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -5652,8 +5283,6 @@ ALTER FUNCTION sf1 #DET# CONTAINS SQL SQL SECURITY DEFINER ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -5671,8 +5300,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -5693,9 +5320,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -5722,8 +5346,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -5789,8 +5411,6 @@ ALTER FUNCTION sf1 #DET# NO SQL SQL SECURITY DEFINER ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -5808,8 +5428,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -5830,9 +5448,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -5859,8 +5474,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -5926,8 +5539,6 @@ ALTER FUNCTION sf1 #DET# READS SQL DATA SQL SECURITY DEFINER ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -5945,8 +5556,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -5967,9 +5576,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -5996,8 +5602,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -6063,8 +5667,6 @@ ALTER FUNCTION sf1 #DET# MODIFIES SQL DATA SQL SECURITY DEFINER ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -6082,8 +5684,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -6104,9 +5704,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -6133,8 +5730,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -6200,8 +5795,6 @@ ALTER FUNCTION sf1 #DET# SQL SECURITY DEFINER COMMENT 'comment' ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -6219,8 +5812,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -6241,9 +5832,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -6270,8 +5858,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -6337,8 +5923,6 @@ ALTER FUNCTION sf1 #DET# CONTAINS SQL SQL SECURITY DEFINER COMMENT 'comment'
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -6356,8 +5940,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -6378,9 +5960,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -6407,8 +5986,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -6474,8 +6051,6 @@ ALTER FUNCTION sf1 #DET# NO SQL SQL SECURITY DEFINER COMMENT 'comment' ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -6493,8 +6068,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -6515,9 +6088,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -6544,8 +6114,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -6611,8 +6179,6 @@ ALTER FUNCTION sf1 #DET# READS SQL DATA SQL SECURITY DEFINER COMMENT 'commen
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -6630,8 +6196,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -6652,9 +6216,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -6681,8 +6242,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -6748,8 +6307,6 @@ ALTER FUNCTION sf1 #DET# MODIFIES SQL DATA SQL SECURITY DEFINER COMMENT 'com
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -6767,8 +6324,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -6789,9 +6344,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -6818,8 +6370,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -6885,8 +6435,6 @@ ALTER FUNCTION sf1 LANGUAGE SQL #DET# SQL SECURITY DEFINER ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -6904,8 +6452,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -6926,9 +6472,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -6955,8 +6498,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -7022,8 +6563,6 @@ ALTER FUNCTION sf1 LANGUAGE SQL #DET# CONTAINS SQL SQL SECURITY DEFINER ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -7041,8 +6580,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -7063,9 +6600,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -7092,8 +6626,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -7159,8 +6691,6 @@ ALTER FUNCTION sf1 LANGUAGE SQL #DET# NO SQL SQL SECURITY DEFINER ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -7178,8 +6708,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -7200,9 +6728,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -7229,8 +6754,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -7296,8 +6819,6 @@ ALTER FUNCTION sf1 LANGUAGE SQL #DET# READS SQL DATA SQL SECURITY DEFINER ;
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -7315,8 +6836,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -7337,9 +6856,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -7366,8 +6882,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -7433,8 +6947,6 @@ ALTER FUNCTION sf1 LANGUAGE SQL #DET# MODIFIES SQL DATA SQL SECURITY DEFINER
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -7452,8 +6964,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -7474,9 +6984,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -7503,8 +7010,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -7570,8 +7075,6 @@ ALTER FUNCTION sf1 LANGUAGE SQL #DET# SQL SECURITY DEFINER COMMENT 'comment'
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -7589,8 +7092,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -7611,9 +7112,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -7640,8 +7138,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -7707,8 +7203,6 @@ ALTER FUNCTION sf1 LANGUAGE SQL #DET# CONTAINS SQL SQL SECURITY DEFINER COMMEN
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -7726,8 +7220,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -7748,9 +7240,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -7777,8 +7266,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -7844,8 +7331,6 @@ ALTER FUNCTION sf1 LANGUAGE SQL #DET# NO SQL SQL SECURITY DEFINER COMMENT 'com
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -7863,8 +7348,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -7885,9 +7368,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -7914,8 +7394,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -7981,8 +7459,6 @@ ALTER FUNCTION sf1 LANGUAGE SQL #DET# READS SQL DATA SQL SECURITY DEFINER COMM
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -8000,8 +7476,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -8022,9 +7496,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -8051,8 +7522,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -8118,8 +7587,6 @@ ALTER FUNCTION sf1 LANGUAGE SQL #DET# MODIFIES SQL DATA SQL SECURITY DEFINER C
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -8137,8 +7604,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -8159,9 +7624,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -8188,8 +7650,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
diff --git a/mysql-test/suite/engines/funcs/r/sf_cursor.result b/mysql-test/suite/engines/funcs/r/sf_cursor.result
index e43bcaa50a3..2fe2171b39d 100644
--- a/mysql-test/suite/engines/funcs/r/sf_cursor.result
+++ b/mysql-test/suite/engines/funcs/r/sf_cursor.result
@@ -34,8 +34,6 @@ END//
SELECT sf1();
sf1()
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -53,8 +51,6 @@ id data
2 1
3 3
INSERT INTO t4 VALUES(sf1());
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -75,9 +71,6 @@ id data
3 3
3 3
UPDATE t4 SET i = sf1() + 1 WHERE i = sf1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -104,8 +97,6 @@ id data
3 3
3 3
DELETE FROM t4 WHERE i = sf1() + 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
diff --git a/mysql-test/suite/engines/funcs/r/sp_alter.result b/mysql-test/suite/engines/funcs/r/sp_alter.result
index 794a692edc4..22036fecde7 100644
--- a/mysql-test/suite/engines/funcs/r/sp_alter.result
+++ b/mysql-test/suite/engines/funcs/r/sp_alter.result
@@ -31,8 +31,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 #DET# ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -82,8 +80,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 #DET# CONTAINS SQL ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -133,8 +129,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 #DET# NO SQL ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -184,8 +178,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 #DET# READS SQL DATA ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -235,8 +227,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 #DET# MODIFIES SQL DATA ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -286,8 +276,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 #DET# COMMENT 'comment' ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -337,8 +325,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 #DET# CONTAINS SQL COMMENT 'comment' ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -388,8 +374,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 #DET# NO SQL COMMENT 'comment' ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -439,8 +423,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 #DET# READS SQL DATA COMMENT 'comment' ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -490,8 +472,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 #DET# MODIFIES SQL DATA COMMENT 'comment' ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -541,8 +521,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 LANGUAGE SQL #DET# ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -592,8 +570,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 LANGUAGE SQL #DET# CONTAINS SQL ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -643,8 +619,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 LANGUAGE SQL #DET# NO SQL ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -694,8 +668,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 LANGUAGE SQL #DET# READS SQL DATA ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -745,8 +717,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 LANGUAGE SQL #DET# MODIFIES SQL DATA ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -796,8 +766,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 LANGUAGE SQL #DET# COMMENT 'comment' ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -847,8 +815,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 LANGUAGE SQL #DET# CONTAINS SQL COMMENT 'comment' ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -898,8 +864,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 LANGUAGE SQL #DET# NO SQL COMMENT 'comment' ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -949,8 +913,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 LANGUAGE SQL #DET# READS SQL DATA COMMENT 'comment' ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1000,8 +962,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 LANGUAGE SQL #DET# MODIFIES SQL DATA COMMENT 'comment' ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1051,8 +1011,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 #DET# SQL SECURITY INVOKER ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1102,8 +1060,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 #DET# CONTAINS SQL SQL SECURITY INVOKER ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1153,8 +1109,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 #DET# NO SQL SQL SECURITY INVOKER ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1204,8 +1158,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 #DET# READS SQL DATA SQL SECURITY INVOKER ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1255,8 +1207,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 #DET# MODIFIES SQL DATA SQL SECURITY INVOKER ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1306,8 +1256,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 #DET# SQL SECURITY INVOKER COMMENT 'comment' ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1357,8 +1305,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 #DET# CONTAINS SQL SQL SECURITY INVOKER COMMENT 'comment' ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1408,8 +1354,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 #DET# NO SQL SQL SECURITY INVOKER COMMENT 'comment' ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1459,8 +1403,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 #DET# READS SQL DATA SQL SECURITY INVOKER COMMENT 'comment' ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1510,8 +1452,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 #DET# MODIFIES SQL DATA SQL SECURITY INVOKER COMMENT 'comment' ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1561,8 +1501,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 LANGUAGE SQL #DET# SQL SECURITY INVOKER ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1612,8 +1550,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 LANGUAGE SQL #DET# CONTAINS SQL SQL SECURITY INVOKER ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1663,8 +1599,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 LANGUAGE SQL #DET# NO SQL SQL SECURITY INVOKER ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1714,8 +1648,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 LANGUAGE SQL #DET# READS SQL DATA SQL SECURITY INVOKER ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1765,8 +1697,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 LANGUAGE SQL #DET# MODIFIES SQL DATA SQL SECURITY INVOKER ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1816,8 +1746,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 LANGUAGE SQL #DET# SQL SECURITY INVOKER COMMENT 'comment' ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1867,8 +1795,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 LANGUAGE SQL #DET# CONTAINS SQL SQL SECURITY INVOKER COMMENT 'comment' ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1918,8 +1844,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 LANGUAGE SQL #DET# NO SQL SQL SECURITY INVOKER COMMENT 'comment' ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -1969,8 +1893,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 LANGUAGE SQL #DET# READS SQL DATA SQL SECURITY INVOKER COMMENT 'comment' ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2020,8 +1942,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 LANGUAGE SQL #DET# MODIFIES SQL DATA SQL SECURITY INVOKER COMMENT 'comment' ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2071,8 +1991,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 #DET# SQL SECURITY DEFINER ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2122,8 +2040,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 #DET# CONTAINS SQL SQL SECURITY DEFINER ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2173,8 +2089,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 #DET# NO SQL SQL SECURITY DEFINER ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2224,8 +2138,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 #DET# READS SQL DATA SQL SECURITY DEFINER ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2275,8 +2187,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 #DET# MODIFIES SQL DATA SQL SECURITY DEFINER ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2326,8 +2236,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 #DET# SQL SECURITY DEFINER COMMENT 'comment' ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2377,8 +2285,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 #DET# CONTAINS SQL SQL SECURITY DEFINER COMMENT 'comment' ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2428,8 +2334,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 #DET# NO SQL SQL SECURITY DEFINER COMMENT 'comment' ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2479,8 +2383,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 #DET# READS SQL DATA SQL SECURITY DEFINER COMMENT 'comment' ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2530,8 +2432,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 #DET# MODIFIES SQL DATA SQL SECURITY DEFINER COMMENT 'comment' ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2581,8 +2481,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 LANGUAGE SQL #DET# SQL SECURITY DEFINER ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2632,8 +2530,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 LANGUAGE SQL #DET# CONTAINS SQL SQL SECURITY DEFINER ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2683,8 +2579,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 LANGUAGE SQL #DET# NO SQL SQL SECURITY DEFINER ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2734,8 +2628,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 LANGUAGE SQL #DET# READS SQL DATA SQL SECURITY DEFINER ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2785,8 +2677,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 LANGUAGE SQL #DET# MODIFIES SQL DATA SQL SECURITY DEFINER ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2836,8 +2726,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 LANGUAGE SQL #DET# SQL SECURITY DEFINER COMMENT 'comment' ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2887,8 +2775,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 LANGUAGE SQL #DET# CONTAINS SQL SQL SECURITY DEFINER COMMENT 'comment' ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2938,8 +2824,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 LANGUAGE SQL #DET# NO SQL SQL SECURITY DEFINER COMMENT 'comment' ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -2989,8 +2873,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 LANGUAGE SQL #DET# READS SQL DATA SQL SECURITY DEFINER COMMENT 'comment' ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
@@ -3040,8 +2922,6 @@ CLOSE cur2;
END//
ALTER PROCEDURE sp1 LANGUAGE SQL #DET# MODIFIES SQL DATA SQL SECURITY DEFINER COMMENT 'comment' ;
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
diff --git a/mysql-test/suite/engines/funcs/r/sp_cursor.result b/mysql-test/suite/engines/funcs/r/sp_cursor.result
index 58383f8a9cb..6ce2aae030c 100644
--- a/mysql-test/suite/engines/funcs/r/sp_cursor.result
+++ b/mysql-test/suite/engines/funcs/r/sp_cursor.result
@@ -30,8 +30,6 @@ CLOSE cur1;
CLOSE cur2;
END//
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM t1 ORDER BY id;
id data
1 1
diff --git a/mysql-test/suite/engines/funcs/r/sq_error.result b/mysql-test/suite/engines/funcs/r/sq_error.result
index c983ff73d12..2090f4b9cd4 100644
--- a/mysql-test/suite/engines/funcs/r/sq_error.result
+++ b/mysql-test/suite/engines/funcs/r/sq_error.result
@@ -16,7 +16,7 @@ SELECT * FROM t1 WHERE c1 = (SELECT c1 FROM t2);
ERROR 21000: Subquery returns more than 1 row
UPDATE t1 SET c2 = (SELECT MAX(c1) FROM t2);
UPDATE t1 SET c1 = (SELECT MAX(c1) FROM t1);
-ERROR HY000: You can't specify target table 't1' for update in FROM clause
+ERROR HY000: Table 't1' is specified twice, both as a target for 'UPDATE' and as a separate source for data
DROP TABLE t1;
DROP TABLE t2;
CREATE TABLE t1 (c1 INT, c2 VARCHAR(100),c3 FLOAT);
@@ -35,7 +35,7 @@ SELECT * FROM t1 WHERE c1 = (SELECT c1 FROM t2);
ERROR 21000: Subquery returns more than 1 row
UPDATE t1 SET c2 = (SELECT MAX(c1) FROM t2);
UPDATE t1 SET c1 = (SELECT MAX(c1) FROM t1);
-ERROR HY000: You can't specify target table 't1' for update in FROM clause
+ERROR HY000: Table 't1' is specified twice, both as a target for 'UPDATE' and as a separate source for data
DROP TABLE t1;
DROP TABLE t2;
CREATE TABLE t1 (c1 INT, c2 BINARY(100),c3 FLOAT);
@@ -54,7 +54,7 @@ SELECT * FROM t1 WHERE c1 = (SELECT c1 FROM t2);
ERROR 21000: Subquery returns more than 1 row
UPDATE t1 SET c2 = (SELECT MAX(c1) FROM t2);
UPDATE t1 SET c1 = (SELECT MAX(c1) FROM t1);
-ERROR HY000: You can't specify target table 't1' for update in FROM clause
+ERROR HY000: Table 't1' is specified twice, both as a target for 'UPDATE' and as a separate source for data
DROP TABLE t1;
DROP TABLE t2;
CREATE TABLE t1 (c1 INT, c2 VARBINARY(100),c3 FLOAT);
@@ -73,6 +73,6 @@ SELECT * FROM t1 WHERE c1 = (SELECT c1 FROM t2);
ERROR 21000: Subquery returns more than 1 row
UPDATE t1 SET c2 = (SELECT MAX(c1) FROM t2);
UPDATE t1 SET c1 = (SELECT MAX(c1) FROM t1);
-ERROR HY000: You can't specify target table 't1' for update in FROM clause
+ERROR HY000: Table 't1' is specified twice, both as a target for 'UPDATE' and as a separate source for data
DROP TABLE t1;
DROP TABLE t2;
diff --git a/mysql-test/suite/engines/funcs/r/ta_rename.result b/mysql-test/suite/engines/funcs/r/ta_rename.result
index 892a49c9708..f9e78428f9e 100644
--- a/mysql-test/suite/engines/funcs/r/ta_rename.result
+++ b/mysql-test/suite/engines/funcs/r/ta_rename.result
@@ -14,7 +14,7 @@ SHOW TABLES;
Tables_in_test
t2
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -39,7 +39,7 @@ SHOW TABLES;
Tables_in_test
t2
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -64,7 +64,7 @@ SHOW TABLES;
Tables_in_test
t2
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -89,7 +89,7 @@ SHOW TABLES;
Tables_in_test
t2
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -114,7 +114,7 @@ SHOW TABLES;
Tables_in_test
t2
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -139,7 +139,7 @@ SHOW TABLES;
Tables_in_test
t2
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -164,7 +164,7 @@ SHOW TABLES;
Tables_in_test
t2
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -189,7 +189,7 @@ SHOW TABLES;
Tables_in_test
t2
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -214,7 +214,7 @@ SHOW TABLES;
Tables_in_test
t2
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -239,7 +239,7 @@ SHOW TABLES;
Tables_in_test
t2
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -264,7 +264,7 @@ SHOW TABLES;
Tables_in_test
t2
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -289,7 +289,7 @@ SHOW TABLES;
Tables_in_test
t2
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -314,7 +314,7 @@ SHOW TABLES;
Tables_in_test
t2
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -339,7 +339,7 @@ SHOW TABLES;
Tables_in_test
t2
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -364,7 +364,7 @@ SHOW TABLES;
Tables_in_test
t2
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -389,7 +389,7 @@ SHOW TABLES;
Tables_in_test
t2
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -414,7 +414,7 @@ SHOW TABLES;
Tables_in_test
t2
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -439,7 +439,7 @@ SHOW TABLES;
Tables_in_test
t2
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -464,7 +464,7 @@ SHOW TABLES;
Tables_in_test
t2
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -489,7 +489,7 @@ SHOW TABLES;
Tables_in_test
t2
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -514,7 +514,7 @@ SHOW TABLES;
Tables_in_test
t2
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -539,7 +539,7 @@ SHOW TABLES;
Tables_in_test
t2
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -564,7 +564,7 @@ SHOW TABLES;
Tables_in_test
t2
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -589,7 +589,7 @@ SHOW TABLES;
Tables_in_test
t2
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -614,7 +614,7 @@ SHOW TABLES;
Tables_in_test
t2
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -639,7 +639,7 @@ SHOW TABLES;
Tables_in_test
t2
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -664,7 +664,7 @@ SHOW TABLES;
Tables_in_test
t2
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -689,7 +689,7 @@ SHOW TABLES;
Tables_in_test
t2
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -714,7 +714,7 @@ SHOW TABLES;
Tables_in_test
t2
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -739,7 +739,7 @@ SHOW TABLES;
Tables_in_test
t2
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -764,7 +764,7 @@ SHOW TABLES;
Tables_in_test
t2
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -789,7 +789,7 @@ SHOW TABLES;
Tables_in_test
t2
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
diff --git a/mysql-test/suite/engines/funcs/r/tc_rename_error.result b/mysql-test/suite/engines/funcs/r/tc_rename_error.result
index bd1c2abc057..cd5108107fb 100644
--- a/mysql-test/suite/engines/funcs/r/tc_rename_error.result
+++ b/mysql-test/suite/engines/funcs/r/tc_rename_error.result
@@ -15,7 +15,7 @@ ERROR 42S01: Table 't1' already exists
RENAME TABLE t3 TO t1;
ERROR 42S01: Table 't1' already exists
RENAME TABLE t3 TO doesnotexist.t1;
-ERROR HY000: Can't find file: './test/t3.frm' (errno: 2 "No such file or directory")
+ERROR 42S02: Table 'test.t3' doesn't exist
SHOW TABLES;
Tables_in_test
t1
diff --git a/mysql-test/suite/engines/funcs/t/db_create_drop.test b/mysql-test/suite/engines/funcs/t/db_create_drop.test
index 119db90293c..a807c146206 100644
--- a/mysql-test/suite/engines/funcs/t/db_create_drop.test
+++ b/mysql-test/suite/engines/funcs/t/db_create_drop.test
@@ -2,14 +2,18 @@
DROP DATABASE IF EXISTS d1;
--enable_warnings
CREATE DATABASE d1;
+--sorted_result
SHOW DATABASES;
SHOW DATABASES LIKE 'd%';
+--sorted_result
SHOW DATABASES LIKE '%';
USE d1;
DROP DATABASE d1;
CREATE SCHEMA d1;
+--sorted_result
SHOW SCHEMAS;
SHOW SCHEMAS LIKE 'd%';
+--sorted_result
SHOW SCHEMAS LIKE '%';
USE d1;
DROP SCHEMA d1;
diff --git a/mysql-test/suite/engines/funcs/t/db_create_error.test b/mysql-test/suite/engines/funcs/t/db_create_error.test
index d88ef846b4b..ca83e5f0806 100644
--- a/mysql-test/suite/engines/funcs/t/db_create_error.test
+++ b/mysql-test/suite/engines/funcs/t/db_create_error.test
@@ -2,9 +2,11 @@
DROP DATABASE IF EXISTS d4;
--enable_warnings
CREATE DATABASE d4;
+--sorted_result
SHOW DATABASES;
--error 1007
CREATE DATABASE d4;
DROP DATABASE d4;
+--sorted_result
SHOW DATABASES;
diff --git a/mysql-test/suite/engines/funcs/t/db_create_if_not_exists.test b/mysql-test/suite/engines/funcs/t/db_create_if_not_exists.test
index 4e631d5657a..7b130504308 100644
--- a/mysql-test/suite/engines/funcs/t/db_create_if_not_exists.test
+++ b/mysql-test/suite/engines/funcs/t/db_create_if_not_exists.test
@@ -3,12 +3,14 @@ DROP DATABASE IF EXISTS d2;
--enable_warnings
CREATE DATABASE d2;
CREATE DATABASE IF NOT EXISTS d2;
+--sorted_result
SHOW DATABASES;
USE d2;
DROP DATABASE d2;
DROP DATABASE IF EXISTS d2;
CREATE SCHEMA d2;
CREATE SCHEMA IF NOT EXISTS d2;
+--sorted_result
SHOW DATABASES;
USE d2;
DROP SCHEMA d2;
diff --git a/mysql-test/suite/engines/funcs/t/db_drop_error.test b/mysql-test/suite/engines/funcs/t/db_drop_error.test
index f21b8b4e817..7fd6738cf1f 100644
--- a/mysql-test/suite/engines/funcs/t/db_drop_error.test
+++ b/mysql-test/suite/engines/funcs/t/db_drop_error.test
@@ -6,5 +6,6 @@ SHOW DATABASES;
--error 1008
DROP DATABASE nond5;
DROP DATABASE d5;
+--sorted_result
SHOW DATABASES;
diff --git a/mysql-test/suite/engines/funcs/t/db_use_error.test b/mysql-test/suite/engines/funcs/t/db_use_error.test
index 00a411ed5cb..9220660f84e 100644
--- a/mysql-test/suite/engines/funcs/t/db_use_error.test
+++ b/mysql-test/suite/engines/funcs/t/db_use_error.test
@@ -6,5 +6,6 @@ SHOW DATABASES;
--error 1064
USE DATABASE nond6;
DROP DATABASE d6;
+--sorted_result
SHOW DATABASES;
diff --git a/mysql-test/suite/engines/funcs/t/rpl_row_until.test b/mysql-test/suite/engines/funcs/t/rpl_row_until.test
index bf38bd487ea..7c1715b760e 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_row_until.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_row_until.test
@@ -11,18 +11,18 @@ INSERT INTO t1 VALUES (1),(2),(3),(4);
DROP TABLE t1;
# Save master log position for query DROP TABLE t1
save_master_pos;
-let $master_pos_drop_t1= query_get_value(SHOW BINLOG EVENTS, Pos, 7);
-let $master_log_file= query_get_value(SHOW BINLOG EVENTS, Log_name, 7);
+let $master_pos_drop_t1= query_get_value(SHOW BINLOG EVENTS, Pos, 10);
+let $master_log_file= query_get_value(SHOW BINLOG EVENTS, Log_name, 10);
CREATE TABLE t2(n INT NOT NULL AUTO_INCREMENT PRIMARY KEY);
# Save master log position for query CREATE TABLE t2
save_master_pos;
-let $master_pos_create_t2= query_get_value(SHOW BINLOG EVENTS, Pos, 8);
+let $master_pos_create_t2= query_get_value(SHOW BINLOG EVENTS, Pos, 12);
INSERT INTO t2 VALUES (1),(2);
save_master_pos;
# Save master log position for query INSERT INTO t2 VALUES (1),(2);
-let $master_pos_insert1_t2= query_get_value(SHOW BINLOG EVENTS, End_log_pos, 12);
+let $master_pos_insert1_t2= query_get_value(SHOW BINLOG EVENTS, End_log_pos, 17);
sync_slave_with_master;
# Save relay log position for query INSERT INTO t2 VALUES (1),(2);
@@ -31,8 +31,8 @@ let $relay_pos_insert1_t2= query_get_value(show slave status, Relay_Log_Pos, 1);
connection master;
INSERT INTO t2 VALUES (3),(4);
DROP TABLE t2;
-# Save master log position for query INSERT INTO t2 VALUES (1),(2);
-let $master_pos_drop_t2= query_get_value(SHOW BINLOG EVENTS, End_log_pos, 17);
+# Save master log position for query DROP TABLE t2;
+let $master_pos_drop_t2= query_get_value(SHOW BINLOG EVENTS, End_log_pos, 23);
sync_slave_with_master;
--source include/stop_slave.inc
diff --git a/mysql-test/suite/engines/funcs/t/tc_rename_error.test b/mysql-test/suite/engines/funcs/t/tc_rename_error.test
index 91efeacaeef..3fc4901a364 100644
--- a/mysql-test/suite/engines/funcs/t/tc_rename_error.test
+++ b/mysql-test/suite/engines/funcs/t/tc_rename_error.test
@@ -11,7 +11,7 @@ let $ENGINE=`select variable_value from information_schema.global_variables wher
RENAME TABLE t2 TO t1;
--error 1050
RENAME TABLE t3 TO t1;
---error 1017
+--error 1146
RENAME TABLE t3 TO doesnotexist.t1;
SHOW TABLES;
let $ENGINE=`select variable_value from information_schema.global_variables where variable_name='STORAGE_ENGINE'`;
diff --git a/mysql-test/suite/engines/iuds/combinations b/mysql-test/suite/engines/iuds/combinations
new file mode 100644
index 00000000000..2077c41a7be
--- /dev/null
+++ b/mysql-test/suite/engines/iuds/combinations
@@ -0,0 +1,8 @@
+[innodb]
+innodb
+default-storage-engine=innodb
+
+[myisam]
+skip-innodb
+default-storage-engine=myisam
+
diff --git a/mysql-test/suite/engines/iuds/r/strings_charsets_update_delete.result b/mysql-test/suite/engines/iuds/r/strings_charsets_update_delete.result
index 08eecb1c17d..fdd1a4be290 100644
--- a/mysql-test/suite/engines/iuds/r/strings_charsets_update_delete.result
+++ b/mysql-test/suite/engines/iuds/r/strings_charsets_update_delete.result
Binary files differ
diff --git a/mysql-test/suite/engines/iuds/r/strings_update_delete.result b/mysql-test/suite/engines/iuds/r/strings_update_delete.result
index 283f39efe84..159d2608ea1 100644
--- a/mysql-test/suite/engines/iuds/r/strings_update_delete.result
+++ b/mysql-test/suite/engines/iuds/r/strings_update_delete.result
@@ -70471,17 +70471,17 @@ SIZE
DROP TABLE t17,t18;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11,t12,t13,t14,t15,t16,t17,t18;
Warnings:
-Note 1051 Unknown table 't3'
-Note 1051 Unknown table 't4'
-Note 1051 Unknown table 't5'
-Note 1051 Unknown table 't6'
-Note 1051 Unknown table 't7'
-Note 1051 Unknown table 't8'
-Note 1051 Unknown table 't9'
-Note 1051 Unknown table 't10'
-Note 1051 Unknown table 't11'
-Note 1051 Unknown table 't13'
-Note 1051 Unknown table 't14'
-Note 1051 Unknown table 't15'
-Note 1051 Unknown table 't17'
-Note 1051 Unknown table 't18'
+Note 1051 Unknown table 'test.t3'
+Note 1051 Unknown table 'test.t4'
+Note 1051 Unknown table 'test.t5'
+Note 1051 Unknown table 'test.t6'
+Note 1051 Unknown table 'test.t7'
+Note 1051 Unknown table 'test.t8'
+Note 1051 Unknown table 'test.t9'
+Note 1051 Unknown table 'test.t10'
+Note 1051 Unknown table 'test.t11'
+Note 1051 Unknown table 'test.t13'
+Note 1051 Unknown table 'test.t14'
+Note 1051 Unknown table 'test.t15'
+Note 1051 Unknown table 'test.t17'
+Note 1051 Unknown table 'test.t18'
diff --git a/mysql-test/suite/engines/iuds/r/type_bit_iuds.result b/mysql-test/suite/engines/iuds/r/type_bit_iuds.result
index 7cfeb1958cf..b7a910f36e6 100644
--- a/mysql-test/suite/engines/iuds/r/type_bit_iuds.result
+++ b/mysql-test/suite/engines/iuds/r/type_bit_iuds.result
@@ -780,7 +780,7 @@ hex(c1) hex(c2)
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'0';
0 + b'0'
0
@@ -1611,7 +1611,7 @@ hex(c1) hex(c2)
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'0';
0 + b'0'
0
@@ -2461,7 +2461,7 @@ hex(c1) hex(c2)
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'0';
0 + b'0'
0
@@ -3375,7 +3375,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'0';
0 + b'0'
0
@@ -4574,7 +4574,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'0';
0 + b'0'
0
@@ -5978,7 +5978,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'0';
0 + b'0'
0
@@ -8049,7 +8049,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'0';
0 + b'0'
0
@@ -11227,7 +11227,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'1';
0 + b'1'
1
@@ -12009,7 +12009,7 @@ hex(c1) hex(c2)
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'1';
0 + b'1'
1
@@ -12840,7 +12840,7 @@ hex(c1) hex(c2)
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'1';
0 + b'1'
1
@@ -13690,7 +13690,7 @@ hex(c1) hex(c2)
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'1';
0 + b'1'
1
@@ -14604,7 +14604,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'1';
0 + b'1'
1
@@ -15803,7 +15803,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'1';
0 + b'1'
1
@@ -17207,7 +17207,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'1';
0 + b'1'
1
@@ -19278,7 +19278,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'1';
0 + b'1'
1
@@ -22456,7 +22456,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'10';
0 + b'10'
2
@@ -23244,7 +23244,7 @@ hex(c1) hex(c2)
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'10';
0 + b'10'
2
@@ -24081,7 +24081,7 @@ hex(c1) hex(c2)
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'10';
0 + b'10'
2
@@ -24931,7 +24931,7 @@ hex(c1) hex(c2)
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'10';
0 + b'10'
2
@@ -25845,7 +25845,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'10';
0 + b'10'
2
@@ -27044,7 +27044,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'10';
0 + b'10'
2
@@ -28448,7 +28448,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'10';
0 + b'10'
2
@@ -30519,7 +30519,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'10';
0 + b'10'
2
@@ -33697,7 +33697,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'1010';
0 + b'1010'
10
@@ -34485,7 +34485,7 @@ hex(c1) hex(c2)
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'1010';
0 + b'1010'
10
@@ -35322,7 +35322,7 @@ hex(c1) hex(c2)
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'1010';
0 + b'1010'
10
@@ -36178,7 +36178,7 @@ hex(c1) hex(c2)
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'1010';
0 + b'1010'
10
@@ -37092,7 +37092,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'1010';
0 + b'1010'
10
@@ -38291,7 +38291,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'1010';
0 + b'1010'
10
@@ -39695,7 +39695,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'1010';
0 + b'1010'
10
@@ -41766,7 +41766,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'1010';
0 + b'1010'
10
@@ -44944,7 +44944,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'1010101010101010';
0 + b'1010101010101010'
43690
@@ -45732,7 +45732,7 @@ hex(c1) hex(c2)
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'1010101010101010';
0 + b'1010101010101010'
43690
@@ -46569,7 +46569,7 @@ hex(c1) hex(c2)
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'1010101010101010';
0 + b'1010101010101010'
43690
@@ -47425,7 +47425,7 @@ hex(c1) hex(c2)
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'1010101010101010';
0 + b'1010101010101010'
43690
@@ -48345,7 +48345,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'1010101010101010';
0 + b'1010101010101010'
43690
@@ -49550,7 +49550,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'1010101010101010';
0 + b'1010101010101010'
43690
@@ -50954,7 +50954,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'1010101010101010';
0 + b'1010101010101010'
43690
@@ -53025,7 +53025,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'1010101010101010';
0 + b'1010101010101010'
43690
@@ -56203,7 +56203,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'101010101010101010101010101010';
0 + b'101010101010101010101010101010'
715827882
@@ -56993,7 +56993,7 @@ hex(c1) hex(c2)
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'101010101010101010101010101010';
0 + b'101010101010101010101010101010'
715827882
@@ -57832,7 +57832,7 @@ hex(c1) hex(c2)
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'101010101010101010101010101010';
0 + b'101010101010101010101010101010'
715827882
@@ -58690,7 +58690,7 @@ hex(c1) hex(c2)
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'101010101010101010101010101010';
0 + b'101010101010101010101010101010'
715827882
@@ -59612,7 +59612,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'101010101010101010101010101010';
0 + b'101010101010101010101010101010'
715827882
@@ -60819,7 +60819,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'101010101010101010101010101010';
0 + b'101010101010101010101010101010'
715827882
@@ -62231,7 +62231,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'101010101010101010101010101010';
0 + b'101010101010101010101010101010'
715827882
@@ -64304,7 +64304,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + b'101010101010101010101010101010';
0 + b'101010101010101010101010101010'
715827882
@@ -67484,7 +67484,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + (1010101010101010101010101010101010101010101010101010101010101010<<0);
0 + (1010101010101010101010101010101010101010101010101010101010101010<<0)
9223372036854775807
@@ -68271,7 +68271,7 @@ hex(c1) hex(c2)
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + (1010101010101010101010101010101010101010101010101010101010101010<<1);
0 + (1010101010101010101010101010101010101010101010101010101010101010<<1)
18446744073709551614
@@ -69107,7 +69107,7 @@ hex(c1) hex(c2)
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + (1010101010101010101010101010101010101010101010101010101010101010<<2);
0 + (1010101010101010101010101010101010101010101010101010101010101010<<2)
18446744073709551612
@@ -69962,7 +69962,7 @@ hex(c1) hex(c2)
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + (1010101010101010101010101010101010101010101010101010101010101010<<4);
0 + (1010101010101010101010101010101010101010101010101010101010101010<<4)
18446744073709551600
@@ -70881,7 +70881,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + (1010101010101010101010101010101010101010101010101010101010101010<<8);
0 + (1010101010101010101010101010101010101010101010101010101010101010<<8)
18446744073709551360
@@ -72085,7 +72085,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + (1010101010101010101010101010101010101010101010101010101010101010<<16);
0 + (1010101010101010101010101010101010101010101010101010101010101010<<16)
18446744073709486080
@@ -73494,7 +73494,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + (1010101010101010101010101010101010101010101010101010101010101010<<32);
0 + (1010101010101010101010101010101010101010101010101010101010101010<<32)
18446744069414584320
@@ -75570,7 +75570,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + (1010101010101010101010101010101010101010101010101010101010101010<<64);
0 + (1010101010101010101010101010101010101010101010101010101010101010<<64)
0
@@ -78747,7 +78747,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + (10101010101010101010101010101010101010101010101010101010101010101<<0);
0 + (10101010101010101010101010101010101010101010101010101010101010101<<0)
9223372036854775807
@@ -79534,7 +79534,7 @@ hex(c1) hex(c2)
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + (10101010101010101010101010101010101010101010101010101010101010101<<1);
0 + (10101010101010101010101010101010101010101010101010101010101010101<<1)
18446744073709551614
@@ -80370,7 +80370,7 @@ hex(c1) hex(c2)
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + (10101010101010101010101010101010101010101010101010101010101010101<<2);
0 + (10101010101010101010101010101010101010101010101010101010101010101<<2)
18446744073709551612
@@ -81225,7 +81225,7 @@ hex(c1) hex(c2)
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + (10101010101010101010101010101010101010101010101010101010101010101<<4);
0 + (10101010101010101010101010101010101010101010101010101010101010101<<4)
18446744073709551600
@@ -82144,7 +82144,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + (10101010101010101010101010101010101010101010101010101010101010101<<8);
0 + (10101010101010101010101010101010101010101010101010101010101010101<<8)
18446744073709551360
@@ -83348,7 +83348,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + (10101010101010101010101010101010101010101010101010101010101010101<<16);
0 + (10101010101010101010101010101010101010101010101010101010101010101<<16)
18446744073709486080
@@ -84757,7 +84757,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + (10101010101010101010101010101010101010101010101010101010101010101<<32);
0 + (10101010101010101010101010101010101010101010101010101010101010101<<32)
18446744069414584320
@@ -86833,7 +86833,7 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
SELECT 0 + (10101010101010101010101010101010101010101010101010101010101010101<<64);
0 + (10101010101010101010101010101010101010101010101010101010101010101<<64)
0
@@ -90016,4 +90016,4 @@ A A
DELETE t5,t6 FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
diff --git a/mysql-test/suite/engines/iuds/r/update_delete_number.result b/mysql-test/suite/engines/iuds/r/update_delete_number.result
index 8e190870092..15de16ed714 100644
--- a/mysql-test/suite/engines/iuds/r/update_delete_number.result
+++ b/mysql-test/suite/engines/iuds/r/update_delete_number.result
@@ -740,7 +740,7 @@ DELETE FROM a1, a2 USING t1 AS a1 INNER JOIN t2 AS a2 WHERE a2.c1=a1.c2;
SELECT * FROM t1,t2 WHERE t2.c1=t1.c2;
c1 c2 c3 c1 c2 c3
DELETE FROM t1,t2 using t1,t2 where t1.c1=(select c1 from t1);
-ERROR HY000: You can't specify target table 't1' for update in FROM clause
+ERROR HY000: Table 't1' is specified twice, both as a target for 'DELETE' and as a separate source for data
CREATE TABLE t3(c1 INT UNSIGNED NOT NULL PRIMARY KEY, c2 INT SIGNED NULL, c3 INT);
CREATE TABLE t4(c1 INT UNSIGNED, c2 INT);
INSERT INTO t3 VALUES(200,126,1),(250,-127,2);
@@ -980,9 +980,9 @@ drop table mt1, mt2, mt3;
create table mt1 (col1 int);
create table mt2 (col1 int);
update mt1,mt2 set mt1.col1 = (select max(col1) from mt1) where mt1.col1 = mt2.col1;
-ERROR HY000: You can't specify target table 'mt1' for update in FROM clause
+ERROR HY000: Table 'mt1' is specified twice, both as a target for 'UPDATE' and as a separate source for data
delete mt1 from mt1,mt2 where mt1.col1 < (select max(col1) from mt1) and mt1.col1 = mt2.col1;
-ERROR HY000: You can't specify target table 'mt1' for update in FROM clause
+ERROR HY000: Table 'mt1' is specified twice, both as a target for 'DELETE' and as a separate source for data
drop table mt1,mt2;
CREATE TABLE IF NOT EXISTS `mt1` (`id` int(11) NOT NULL auto_increment, `tst` text, `tsmt1` text, PRIMARY KEY (`id`));
CREATE TABLE IF NOT EXISTS `mt2` (`ID` int(11) NOT NULL auto_increment, `ParId` int(11) default NULL, `tst` text, `tsmt1` text, PRIMARY KEY (`ID`), KEY `IX_ParId_mt2` (`ParId`), FOREIGN KEY (`ParId`) REFERENCES `mt1` (`id`));
@@ -1853,7 +1853,7 @@ DELETE FROM a1, a2 USING t1 AS a1 INNER JOIN t2 AS a2 WHERE a2.c1=a1.c2;
SELECT * FROM t1,t2 WHERE t2.c1=t1.c2;
c1 c2 c3 c1 c2 c3
DELETE FROM t1,t2 using t1,t2 where t1.c1=(select c1 from t1);
-ERROR HY000: You can't specify target table 't1' for update in FROM clause
+ERROR HY000: Table 't1' is specified twice, both as a target for 'DELETE' and as a separate source for data
CREATE TABLE t3(c1 TINYINT UNSIGNED NOT NULL PRIMARY KEY, c2 TINYINT SIGNED NULL, c3 INT);
CREATE TABLE t4(c1 TINYINT UNSIGNED, c2 INT);
INSERT INTO t3 VALUES(200,126,1),(250,-127,2);
@@ -2600,7 +2600,7 @@ DELETE FROM a1, a2 USING t1 AS a1 INNER JOIN t2 AS a2 WHERE a2.c1=a1.c2;
SELECT * FROM t1,t2 WHERE t2.c1=t1.c2;
c1 c2 c3 c1 c2 c3
DELETE FROM t1,t2 using t1,t2 where t1.c1=(select c1 from t1);
-ERROR HY000: You can't specify target table 't1' for update in FROM clause
+ERROR HY000: Table 't1' is specified twice, both as a target for 'DELETE' and as a separate source for data
CREATE TABLE t3(c1 SMALLINT UNSIGNED NOT NULL PRIMARY KEY, c2 SMALLINT SIGNED NULL, c3 INT);
CREATE TABLE t4(c1 SMALLINT UNSIGNED, c2 INT);
INSERT INTO t3 VALUES(200,126,1),(250,-127,2);
@@ -3347,7 +3347,7 @@ DELETE FROM a1, a2 USING t1 AS a1 INNER JOIN t2 AS a2 WHERE a2.c1=a1.c2;
SELECT * FROM t1,t2 WHERE t2.c1=t1.c2;
c1 c2 c3 c1 c2 c3
DELETE FROM t1,t2 using t1,t2 where t1.c1=(select c1 from t1);
-ERROR HY000: You can't specify target table 't1' for update in FROM clause
+ERROR HY000: Table 't1' is specified twice, both as a target for 'DELETE' and as a separate source for data
CREATE TABLE t3(c1 MEDIUMINT UNSIGNED NOT NULL PRIMARY KEY, c2 MEDIUMINT SIGNED NULL, c3 INT);
CREATE TABLE t4(c1 MEDIUMINT UNSIGNED, c2 INT);
INSERT INTO t3 VALUES(200,126,1),(250,-127,2);
@@ -4077,7 +4077,7 @@ DELETE FROM a1, a2 USING t1 AS a1 INNER JOIN t2 AS a2 WHERE a2.c1=a1.c2;
SELECT * FROM t1,t2 WHERE t2.c1=t1.c2;
c1 c2 c3 c1 c2 c3
DELETE FROM t1,t2 using t1,t2 where t1.c1=(select c1 from t1);
-ERROR HY000: You can't specify target table 't1' for update in FROM clause
+ERROR HY000: Table 't1' is specified twice, both as a target for 'DELETE' and as a separate source for data
CREATE TABLE t3(c1 BIGINT UNSIGNED NOT NULL PRIMARY KEY, c2 BIGINT SIGNED NULL, c3 INT);
CREATE TABLE t4(c1 BIGINT UNSIGNED, c2 INT);
INSERT INTO t3 VALUES(200,126,1),(250,-127,2);
diff --git a/mysql-test/suite/innodb/r/group_commit_crash.result b/mysql-test/suite/innodb/r/group_commit_crash.result
index 005049df281..5d5dffab33e 100644
--- a/mysql-test/suite/innodb/r/group_commit_crash.result
+++ b/mysql-test/suite/innodb/r/group_commit_crash.result
@@ -1,3 +1,4 @@
+call mtr.add_suppression("InnoDB: Warning: database page corruption or a failed");
CREATE TABLE t1(a CHAR(255),
b CHAR(255),
c CHAR(255),
diff --git a/mysql-test/suite/innodb/r/group_commit_crash_no_optimize_thread.result b/mysql-test/suite/innodb/r/group_commit_crash_no_optimize_thread.result
index 846500ad2b1..542ce9d496e 100644
--- a/mysql-test/suite/innodb/r/group_commit_crash_no_optimize_thread.result
+++ b/mysql-test/suite/innodb/r/group_commit_crash_no_optimize_thread.result
@@ -1,3 +1,4 @@
+call mtr.add_suppression("InnoDB: Warning: database page corruption or a failed");
CREATE TABLE t1(a CHAR(255),
b CHAR(255),
c CHAR(255),
diff --git a/mysql-test/suite/innodb/r/innochecksum.result b/mysql-test/suite/innodb/r/innochecksum.result
new file mode 100644
index 00000000000..c75e83e5ed7
--- /dev/null
+++ b/mysql-test/suite/innodb/r/innochecksum.result
@@ -0,0 +1,31 @@
+# Create and populate a table
+CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY, b TEXT) ENGINE=InnoDB;
+INSERT INTO t1 (b) VALUES ('corrupt me');
+INSERT INTO t1 (b) VALUES ('corrupt me');
+CREATE TABLE t2 (a INT AUTO_INCREMENT PRIMARY KEY, b TEXT)
+ROW_FORMAT=COMPRESSED ENGINE=InnoDB ;
+INSERT INTO t2(b) SELECT b from t1;
+CREATE TABLE t3 (a INT AUTO_INCREMENT PRIMARY KEY, b TEXT)
+ROW_FORMAT=COMPRESSED ENGINE=InnoDB KEY_BLOCK_SIZE=16;
+INSERT INTO t3(b) SELECT b from t1;
+# Write file to make mysql-test-run.pl expect the "crash", but don't
+# start it until it's told to
+# We give 30 seconds to do a clean shutdown because we do not want
+# to redo apply the pages of t1.ibd at the time of recovery.
+# We want SQL to initiate the first access to t1.ibd.
+# Wait until disconnected.
+# Run innochecksum on t1
+InnoDB offline file checksum utility.
+Table is uncompressed
+Page size is 16384
+# Run innochecksum on t2
+InnoDB offline file checksum utility.
+Table is compressed
+Key block size is 8192
+# Run innochecksum on t3
+InnoDB offline file checksum utility.
+Table is compressed
+Key block size is 16384
+# Write file to make mysql-test-run.pl start up the server again
+# Cleanup
+DROP TABLE t1, t2, t3;
diff --git a/mysql-test/suite/innodb/r/innodb-mdev7046.result b/mysql-test/suite/innodb/r/innodb-mdev7046.result
new file mode 100644
index 00000000000..d00491fd7e5
--- /dev/null
+++ b/mysql-test/suite/innodb/r/innodb-mdev7046.result
@@ -0,0 +1 @@
+1
diff --git a/mysql-test/suite/innodb/r/innodb-stats-sample.result b/mysql-test/suite/innodb/r/innodb-stats-sample.result
new file mode 100644
index 00000000000..a049a1d82c1
--- /dev/null
+++ b/mysql-test/suite/innodb/r/innodb-stats-sample.result
@@ -0,0 +1,4 @@
+Variable_name Value
+innodb_stats_sample_pages 1
+Variable_name Value
+innodb_stats_traditional OFF
diff --git a/mysql-test/suite/innodb/r/innodb-wl5522-debug-zip.result b/mysql-test/suite/innodb/r/innodb-wl5522-debug-zip.result
index a4e44be1c72..0e863f5849e 100644
--- a/mysql-test/suite/innodb/r/innodb-wl5522-debug-zip.result
+++ b/mysql-test/suite/innodb/r/innodb-wl5522-debug-zip.result
@@ -104,7 +104,7 @@ restore: t1 .ibd and .cfg files
SET SESSION debug_dbug="-d,ib_import_reset_space_and_lsn_failure";
SET SESSION debug_dbug="+d,ib_import_open_tablespace_failure";
ALTER TABLE test_wl5522.t1 IMPORT TABLESPACE;
-ERROR HY000: Got error 44 'Tablespace not found' from ./test_wl5522/t1.ibd
+ERROR HY000: Got error 44 't1.ibd
SET SESSION debug_dbug="-d,ib_import_open_tablespace_failure";
restore: t1 .ibd and .cfg files
SET SESSION debug_dbug="+d,ib_import_check_bitmap_failure";
@@ -537,7 +537,7 @@ ERROR HY000: Tablespace has been discarded for table 't1'
restore: t1 .ibd and .cfg files
SET SESSION debug_dbug="+d,fil_space_create_failure";
ALTER TABLE test_wl5522.t1 IMPORT TABLESPACE;
-ERROR HY000: Got error 11 'Generic error' from ./test_wl5522/t1.ibd
+ERROR HY000: Got error 11 't1.ibd
SET SESSION debug_dbug="-d,fil_space_create_failure";
DROP TABLE test_wl5522.t1;
unlink: t1.ibd
@@ -550,7 +550,7 @@ ERROR HY000: Tablespace has been discarded for table 't1'
restore: t1 .ibd and .cfg files
SET SESSION debug_dbug="+d,dict_tf_to_fsp_flags_failure";
ALTER TABLE test_wl5522.t1 IMPORT TABLESPACE;
-ERROR HY000: Got error 39 'Data structure corruption' from ./test_wl5522/t1.ibd
+ERROR HY000: Got error 39 't1.ibd
SET SESSION debug_dbug="-d,dict_tf_to_fsp_flags_failure";
DROP TABLE test_wl5522.t1;
unlink: t1.ibd
diff --git a/mysql-test/suite/innodb/r/innodb_bug12400341.result b/mysql-test/suite/innodb/r/innodb_bug12400341.result
index 86eaa83980d..c382bd12616 100644
--- a/mysql-test/suite/innodb/r/innodb_bug12400341.result
+++ b/mysql-test/suite/innodb/r/innodb_bug12400341.result
@@ -1,4 +1,4 @@
-call mtr.add_suppression("InnoDB: Warning: cannot find a free slot for an undo log. Do you have too");
+call mtr.add_suppression("InnoDB: Warning: cannot find a free slot for an undo log. Do you have too*");
set @old_innodb_undo_logs = @@innodb_undo_logs;
set global innodb_undo_logs=1;
show variables like "max_connections";
diff --git a/mysql-test/suite/innodb/r/innodb_corrupt_bit.result b/mysql-test/suite/innodb/r/innodb_corrupt_bit.result
index 8ec10a86b37..353303825e5 100644
--- a/mysql-test/suite/innodb/r/innodb_corrupt_bit.result
+++ b/mysql-test/suite/innodb/r/innodb_corrupt_bit.result
@@ -78,4 +78,5 @@ z
31
32
drop table corrupt_bit_test_Ä;
+DROP DATABASE pad;
SET GLOBAL innodb_change_buffering_debug = 0;
diff --git a/mysql-test/suite/innodb/r/innodb_stats_create_on_corrupted.result b/mysql-test/suite/innodb/r/innodb_stats_create_on_corrupted.result
index 47c714bb0a6..c351b222496 100644
--- a/mysql-test/suite/innodb/r/innodb_stats_create_on_corrupted.result
+++ b/mysql-test/suite/innodb/r/innodb_stats_create_on_corrupted.result
@@ -17,4 +17,16 @@ avg_row_length 0
max_data_length 0
index_length 0
ALTER TABLE mysql.innodb_index_stats_ RENAME TO mysql.innodb_index_stats;
+SELECT seq_in_index, column_name, cardinality
+FROM information_schema.statistics WHERE table_name = 'test_ps_create_on_corrupted'
+ORDER BY index_name, seq_in_index;
+seq_in_index 1
+column_name a
+cardinality 0
+SELECT table_rows, avg_row_length, max_data_length, index_length
+FROM information_schema.tables WHERE table_name = 'test_ps_create_on_corrupted';
+table_rows 0
+avg_row_length 0
+max_data_length 0
+index_length 0
DROP TABLE test_ps_create_on_corrupted;
diff --git a/mysql-test/suite/innodb/r/innodb_stats_fetch_nonexistent.result b/mysql-test/suite/innodb/r/innodb_stats_fetch_nonexistent.result
index 4d1b8817bf1..befbb709c19 100644
--- a/mysql-test/suite/innodb/r/innodb_stats_fetch_nonexistent.result
+++ b/mysql-test/suite/innodb/r/innodb_stats_fetch_nonexistent.result
@@ -1,3 +1,4 @@
+call mtr.add_suppression("InnoDB: Error: Fetch of persistent statistics requested for table*");
CREATE TABLE test_ps_fetch_nonexistent
(a INT, PRIMARY KEY (a))
ENGINE=INNODB STATS_PERSISTENT=1;
diff --git a/mysql-test/suite/innodb/r/insert_debug.result b/mysql-test/suite/innodb/r/insert_debug.result
new file mode 100644
index 00000000000..0d176afa116
--- /dev/null
+++ b/mysql-test/suite/innodb/r/insert_debug.result
@@ -0,0 +1,11 @@
+#
+# Bug#19904003 INNODB_LIMIT_OPTIMISTIC_INSERT_DEBUG=1
+# CAUSES INFINITE PAGE SPLIT
+#
+SET GLOBAL innodb_change_buffering_debug=1;
+SET GLOBAL innodb_limit_optimistic_insert_debug=1;
+CREATE TABLE t1(c1 INT PRIMARY KEY) ENGINE=InnoDB
+PARTITION BY HASH (c1) PARTITIONS 15;
+DROP TABLE t1;
+SET GLOBAL innodb_change_buffering_debug=0;
+SET GLOBAL innodb_limit_optimistic_insert_debug=0;
diff --git a/mysql-test/suite/innodb/r/multi_repair-7404.result b/mysql-test/suite/innodb/r/multi_repair-7404.result
new file mode 100644
index 00000000000..b3db5755b87
--- /dev/null
+++ b/mysql-test/suite/innodb/r/multi_repair-7404.result
@@ -0,0 +1,21 @@
+create table `t1`(`a` int) engine=innodb partition by key (`a`);
+create table `t2`(`b` int) engine=innodb;
+create table `t3`(`c` int) engine=innodb;
+insert t1 values (1);
+insert t2 values (2);
+insert t3 values (3);
+repair table `t1`,`t2`,`t3`;
+Table Op Msg_type Msg_text
+test.t1 repair status OK
+test.t2 repair status OK
+test.t3 repair status OK
+select * from t1;
+a
+1
+select * from t2;
+b
+2
+select * from t3;
+c
+3
+drop table t1, t2, t3;
diff --git a/mysql-test/suite/innodb/r/sp_temp_table.result b/mysql-test/suite/innodb/r/sp_temp_table.result
new file mode 100644
index 00000000000..49a2a4aa831
--- /dev/null
+++ b/mysql-test/suite/innodb/r/sp_temp_table.result
@@ -0,0 +1,253 @@
+#
+# Bug #19306524 FAILING ASSERTION WITH TEMP TABLE FOR A PROCEDURE
+# CALLED FROM A FUNCTION
+#
+call mtr.add_suppression("MySQL is trying to drop table");
+CREATE PROCEDURE cachedata(
+IN obj_id BIGINT UNSIGNED,
+IN start DATETIME,
+IN end DATETIME
+)
+cachedata:BEGIN
+DECLARE cache_count BIGINT;
+SET @timestamp := NOW();
+CREATE TEMPORARY TABLE IF NOT EXISTS cachedata (
+timestamp DATETIME,
+object_id BIGINT UNSIGNED NOT NULL,
+start DATETIME,
+end DATETIME,
+seqno BIGINT AUTO_INCREMENT,
+value FLOAT,
+PRIMARY KEY (seqno),
+INDEX (timestamp),
+INDEX (object_id, start, end)
+) ENGINE=INNODB;
+DELETE FROM cachedata WHERE
+timestamp < DATE_SUB(@timestamp, INTERVAL 15 SECOND);
+SELECT count(*) INTO cache_count FROM cachedata WHERE
+object_id = obj_id
+AND start = start
+AND end = end;
+IF cache_count > 0 THEN LEAVE cachedata;
+END IF;
+INSERT INTO cachedata (timestamp, object_id, start, end, value) VALUES
+(@timestamp, obj_id, start, end, 1234),
+(@timestamp, obj_id, start, end, 4567),
+(@timestamp, obj_id, start, end, 8901),
+(@timestamp, obj_id, start, end, 1234),
+(@timestamp, obj_id, start, end, 4567),
+(@timestamp, obj_id, start, end, 8901),
+(@timestamp, obj_id, start, end, 1234),
+(@timestamp, obj_id, start, end, 4567),
+(@timestamp, obj_id, start, end, 8901),
+(@timestamp, obj_id, start, end, 1234),
+(@timestamp, obj_id, start, end, 4567),
+(@timestamp, obj_id, start, end, 8901),
+(@timestamp, obj_id, start, end, 2345),
+(@timestamp, obj_id, start, end, 1234),
+(@timestamp, obj_id, start, end, 4567),
+(@timestamp, obj_id, start, end, 8901),
+(@timestamp, obj_id, start, end, 2345),
+(@timestamp, obj_id, start, end, 1234),
+(@timestamp, obj_id, start, end, 4567),
+(@timestamp, obj_id, start, end, 8901),
+(@timestamp, obj_id, start, end, 2345);
+END$$
+CREATE FUNCTION get_cache(
+obj_id BIGINT UNSIGNED,
+start DATETIME,
+end DATETIME
+)
+RETURNS FLOAT
+READS SQL DATA
+BEGIN
+DECLARE result FLOAT;
+CALL cachedata(obj_id, start, end);
+SELECT SUM(value) INTO result FROM cachedata WHERE
+object_id = obj_id
+AND start = start
+AND end = end;
+RETURN result;
+END$$
+SELECT get_cache(1, '2014-01-01', '2014-02-01');
+get_cache(1, '2014-01-01', '2014-02-01')
+95247
+select sleep(1);
+sleep(1)
+0
+SELECT get_cache(1, '2014-01-01', '2014-02-01');
+get_cache(1, '2014-01-01', '2014-02-01')
+95247
+select sleep(1);
+sleep(1)
+0
+SELECT get_cache(1, '2014-01-01', '2014-02-01');
+get_cache(1, '2014-01-01', '2014-02-01')
+95247
+select sleep(1);
+sleep(1)
+0
+SELECT get_cache(1, '2014-01-01', '2014-02-01');
+get_cache(1, '2014-01-01', '2014-02-01')
+95247
+select sleep(1);
+sleep(1)
+0
+SELECT get_cache(1, '2014-01-01', '2014-02-01');
+get_cache(1, '2014-01-01', '2014-02-01')
+95247
+select sleep(1);
+sleep(1)
+0
+SELECT get_cache(1, '2014-01-01', '2014-02-01');
+get_cache(1, '2014-01-01', '2014-02-01')
+95247
+select sleep(1);
+sleep(1)
+0
+SELECT get_cache(1, '2014-01-01', '2014-02-01');
+get_cache(1, '2014-01-01', '2014-02-01')
+95247
+select sleep(1);
+sleep(1)
+0
+SELECT get_cache(1, '2014-01-01', '2014-02-01');
+get_cache(1, '2014-01-01', '2014-02-01')
+95247
+select sleep(1);
+sleep(1)
+0
+SELECT get_cache(1, '2014-01-01', '2014-02-01');
+get_cache(1, '2014-01-01', '2014-02-01')
+95247
+select sleep(1);
+sleep(1)
+0
+SELECT get_cache(1, '2014-01-01', '2014-02-01');
+get_cache(1, '2014-01-01', '2014-02-01')
+95247
+select sleep(1);
+sleep(1)
+0
+SELECT get_cache(1, '2014-01-01', '2014-02-01');
+get_cache(1, '2014-01-01', '2014-02-01')
+95247
+select sleep(1);
+sleep(1)
+0
+SELECT get_cache(1, '2014-01-01', '2014-02-01');
+get_cache(1, '2014-01-01', '2014-02-01')
+95247
+select sleep(1);
+sleep(1)
+0
+SELECT get_cache(1, '2014-01-01', '2014-02-01');
+get_cache(1, '2014-01-01', '2014-02-01')
+95247
+select sleep(1);
+sleep(1)
+0
+SELECT get_cache(1, '2014-01-01', '2014-02-01');
+get_cache(1, '2014-01-01', '2014-02-01')
+95247
+select sleep(1);
+sleep(1)
+0
+SELECT get_cache(1, '2014-01-01', '2014-02-01');
+get_cache(1, '2014-01-01', '2014-02-01')
+95247
+select sleep(1);
+sleep(1)
+0
+SELECT get_cache(1, '2014-01-01', '2014-02-01');
+get_cache(1, '2014-01-01', '2014-02-01')
+95247
+select sleep(1);
+sleep(1)
+0
+SELECT get_cache(1, '2014-01-01', '2014-02-01');
+get_cache(1, '2014-01-01', '2014-02-01')
+95247
+select sleep(1);
+sleep(1)
+0
+SELECT get_cache(1, '2014-01-01', '2014-02-01');
+get_cache(1, '2014-01-01', '2014-02-01')
+95247
+select sleep(1);
+sleep(1)
+0
+SELECT get_cache(1, '2014-01-01', '2014-02-01');
+get_cache(1, '2014-01-01', '2014-02-01')
+95247
+select sleep(1);
+sleep(1)
+0
+SELECT get_cache(1, '2014-01-01', '2014-02-01');
+get_cache(1, '2014-01-01', '2014-02-01')
+95247
+select sleep(1);
+sleep(1)
+0
+SELECT get_cache(1, '2014-01-01', '2014-02-01');
+get_cache(1, '2014-01-01', '2014-02-01')
+95247
+select sleep(1);
+sleep(1)
+0
+SELECT get_cache(1, '2014-01-01', '2014-02-01');
+get_cache(1, '2014-01-01', '2014-02-01')
+95247
+select sleep(1);
+sleep(1)
+0
+SELECT get_cache(1, '2014-01-01', '2014-02-01');
+get_cache(1, '2014-01-01', '2014-02-01')
+95247
+select sleep(1);
+sleep(1)
+0
+SELECT get_cache(1, '2014-01-01', '2014-02-01');
+get_cache(1, '2014-01-01', '2014-02-01')
+95247
+select sleep(1);
+sleep(1)
+0
+SELECT get_cache(1, '2014-01-01', '2014-02-01');
+get_cache(1, '2014-01-01', '2014-02-01')
+95247
+select sleep(1);
+sleep(1)
+0
+SELECT get_cache(1, '2014-01-01', '2014-02-01');
+get_cache(1, '2014-01-01', '2014-02-01')
+95247
+select sleep(1);
+sleep(1)
+0
+SELECT get_cache(1, '2014-01-01', '2014-02-01');
+get_cache(1, '2014-01-01', '2014-02-01')
+95247
+select sleep(1);
+sleep(1)
+0
+SELECT get_cache(1, '2014-01-01', '2014-02-01');
+get_cache(1, '2014-01-01', '2014-02-01')
+95247
+select sleep(1);
+sleep(1)
+0
+SELECT get_cache(1, '2014-01-01', '2014-02-01');
+get_cache(1, '2014-01-01', '2014-02-01')
+95247
+select sleep(1);
+sleep(1)
+0
+SELECT get_cache(1, '2014-01-01', '2014-02-01');
+get_cache(1, '2014-01-01', '2014-02-01')
+95247
+select sleep(1);
+sleep(1)
+0
+DROP FUNCTION get_cache;
+DROP PROCEDURE cachedata;
diff --git a/mysql-test/suite/innodb/r/strict_mode.result b/mysql-test/suite/innodb/r/strict_mode.result
new file mode 100644
index 00000000000..d6a621212c3
--- /dev/null
+++ b/mysql-test/suite/innodb/r/strict_mode.result
@@ -0,0 +1,242 @@
+#
+# Bug #17852083 PRINT A WARNING WHEN DDL HAS AN ERROR IN
+# INNODB_STRICT_MODE = 1
+#
+set innodb_strict_mode = 0;
+create table t1 (id int auto_increment primary key,
+v varchar(32),
+col1 text,
+col2 text,
+col3 text,
+col4 text,
+col5 text,
+col6 text,
+col7 text,
+col8 text,
+col9 text,
+col10 text,
+col11 text,
+col12 text,
+col13 text,
+col14 text,
+col15 text,
+col16 text,
+col17 text,
+col18 text,
+col19 text,
+col20 text,
+col21 text,
+col22 text,
+col23 text,
+col24 text,
+col25 text,
+col26 text,
+col27 text,
+col28 text,
+col29 text,
+col30 text,
+col31 text,
+col32 text,
+col33 text,
+col34 text,
+col35 text,
+col36 text,
+col37 text,
+col38 text,
+col39 text,
+col40 text,
+col41 text,
+col42 text,
+col43 text,
+col44 text,
+col45 text ,
+col46 text,
+col47 text,
+col48 text,
+col49 text,
+col50 text,
+col51 text,
+col52 text,
+col53 text,
+col54 text,
+col55 text,
+col56 text,
+col57 text,
+col58 text,
+col59 text,
+col60 text,
+col61 text,
+col62 text,
+col63 text,
+col64 text,
+col65 text,
+col66 text,
+col67 text,
+col68 text ,
+col69 text,
+col70 text,
+col71 text,
+col72 text,
+col73 text,
+col74 text,
+col75 text,
+col76 text,
+col77 text,
+col78 text,
+col79 text,
+col80 text,
+col81 text,
+col82 text,
+col83 text,
+col84 text,
+col85 text,
+col86 text,
+col87 text,
+col88 text,
+col89 text,
+col90 text,
+col91 text,
+col92 text,
+col93 text,
+col94 text,
+col95 text,
+col96 text,
+col97 text,
+col98 text,
+col99 text,
+col100 text,
+col101 text,
+col102 text,
+col103 text,
+col104 text,
+col105 text,
+col106 text,
+col107 text,
+col108 text,
+col109 text,
+col110 text,
+col111 text,
+col112 text,
+col113 text,
+col114 text,
+col115 text,
+col116 text,
+col117 text,
+col118 text,
+col119 text,
+col120 text,
+col121 text,
+col122 text,
+col123 text,
+col124 text,
+col125 text,
+col126 text ,
+col127 text,
+col128 text,
+col129 text,
+col130 text,
+col131 text,
+col132 text,
+col133 text,
+col134 text,
+col135 text,
+col136 text,
+col137 text,
+col138 text,
+col139 text,
+col140 text,
+col141 text,
+col142 text,
+col143 text,
+col144 text,
+col145 text,
+col146 text,
+col147 text ,
+col148 text,
+col149 text,
+col150 text,
+col151 text,
+col152 text,
+col153 text,
+col154 text,
+col155 text,
+col156 text,
+col157 text,
+col158 text,
+col159 text,
+col160 text,
+col161 text,
+col162 text,
+col163 text,
+col164 text,
+col165 text,
+col166 text,
+col167 text,
+col168 text,
+col169 text,
+col170 text,
+col171 text,
+col172 text ,
+col173 text,
+col174 text,
+col175 text,
+col176 text,
+col177 text,
+col178 text,
+col179 text,
+col180 text,
+col181 text,
+col182 text,
+col183 text,
+col184 text,
+col185 text,
+col186 text,
+col187 text,
+col188 text,
+col189 text,
+col190 text,
+col191 text,
+col192 text,
+col193 text,
+col194 text,
+col195 text,
+col196 text,
+col197 text,
+col198 text,
+col199 text,
+col200 text,
+col201 text,
+col202 text,
+col203 text,
+col204 text,
+col205 text,
+col206 text,
+col207 text,
+col208 text,
+col209 text,
+col210 text,
+col211 text,
+col212 text,
+col213 text,
+col214 text,
+col215 text,
+col216 text,
+col217 text,
+col218 text,
+col219 text,
+col220 text,
+col221 text,
+col222 text,
+col223 text,
+col224 text,
+col225 text,
+col226 text,
+col227 text,
+col228 text
+) ENGINE=InnoDB;
+Warnings:
+Warning 139 Row size too large (> 8126). Changing some columns to TEXT or BLOB or using ROW_FORMAT=DYNAMIC or ROW_FORMAT=COMPRESSED may help. In current row format, BLOB prefix of 768 bytes is stored inline.
+set innodb_strict_mode = 1;
+alter table t1 engine=InnoDB;
+ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs
+drop table t1;
diff --git a/mysql-test/suite/innodb/t/group_commit_crash.test b/mysql-test/suite/innodb/t/group_commit_crash.test
index 7ad0d9d1e74..cad349819bd 100644
--- a/mysql-test/suite/innodb/t/group_commit_crash.test
+++ b/mysql-test/suite/innodb/t/group_commit_crash.test
@@ -9,6 +9,8 @@
--source include/have_debug.inc
--source include/have_log_bin.inc
+call mtr.add_suppression("InnoDB: Warning: database page corruption or a failed");
+
let $file_format_max=`SELECT @@innodb_file_format_max`;
CREATE TABLE t1(a CHAR(255),
b CHAR(255),
diff --git a/mysql-test/suite/innodb/t/group_commit_crash_no_optimize_thread.test b/mysql-test/suite/innodb/t/group_commit_crash_no_optimize_thread.test
index 9dc2557e687..8d1f460b64b 100644
--- a/mysql-test/suite/innodb/t/group_commit_crash_no_optimize_thread.test
+++ b/mysql-test/suite/innodb/t/group_commit_crash_no_optimize_thread.test
@@ -9,6 +9,8 @@
--source include/have_debug.inc
--source include/have_log_bin.inc
+call mtr.add_suppression("InnoDB: Warning: database page corruption or a failed");
+
let $file_format_max=`SELECT @@innodb_file_format_max`;
CREATE TABLE t1(a CHAR(255),
b CHAR(255),
diff --git a/mysql-test/suite/innodb/t/innochecksum.opt b/mysql-test/suite/innodb/t/innochecksum.opt
new file mode 100644
index 00000000000..cc738d97434
--- /dev/null
+++ b/mysql-test/suite/innodb/t/innochecksum.opt
@@ -0,0 +1,2 @@
+--innodb_file_per_table=1
+--innodb_file_format=Barracuda
diff --git a/mysql-test/suite/innodb/t/innochecksum.test b/mysql-test/suite/innodb/t/innochecksum.test
new file mode 100644
index 00000000000..34df2801880
--- /dev/null
+++ b/mysql-test/suite/innodb/t/innochecksum.test
@@ -0,0 +1,70 @@
+#
+# Test innochecksum
+#
+
+# Don't test under embedded
+source include/not_embedded.inc;
+# Require InnoDB
+source include/have_innodb.inc;
+
+if (!$INNOCHECKSUM) {
+ --echo Need innochecksum binary
+ --die Need innochecksum binary
+}
+
+--echo # Create and populate a table
+CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY, b TEXT) ENGINE=InnoDB;
+INSERT INTO t1 (b) VALUES ('corrupt me');
+--disable_query_log
+--let $i = 1000
+while ($i)
+{
+ INSERT INTO t1 (b) VALUES (REPEAT('abcdefghijklmnopqrstuvwxyz', 100));
+ dec $i;
+}
+--enable_query_log
+INSERT INTO t1 (b) VALUES ('corrupt me');
+
+CREATE TABLE t2 (a INT AUTO_INCREMENT PRIMARY KEY, b TEXT)
+ROW_FORMAT=COMPRESSED ENGINE=InnoDB ;
+
+INSERT INTO t2(b) SELECT b from t1;
+
+CREATE TABLE t3 (a INT AUTO_INCREMENT PRIMARY KEY, b TEXT)
+ROW_FORMAT=COMPRESSED ENGINE=InnoDB KEY_BLOCK_SIZE=16;
+
+INSERT INTO t3(b) SELECT b from t1;
+
+let $MYSQLD_DATADIR=`select @@datadir`;
+let t1_IBD = $MYSQLD_DATADIR/test/t1.ibd;
+let t2_IBD = $MYSQLD_DATADIR/test/t2.ibd;
+let t3_IBD = $MYSQLD_DATADIR/test/t3.ibd;
+
+--echo # Write file to make mysql-test-run.pl expect the "crash", but don't
+--echo # start it until it's told to
+--exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+
+--echo # We give 30 seconds to do a clean shutdown because we do not want
+--echo # to redo apply the pages of t1.ibd at the time of recovery.
+--echo # We want SQL to initiate the first access to t1.ibd.
+shutdown_server 30;
+
+--echo # Wait until disconnected.
+--source include/wait_until_disconnected.inc
+
+--echo # Run innochecksum on t1
+--exec $INNOCHECKSUM $t1_IBD
+
+--echo # Run innochecksum on t2
+--exec $INNOCHECKSUM $t2_IBD
+
+--echo # Run innochecksum on t3
+--exec $INNOCHECKSUM $t3_IBD
+
+--echo # Write file to make mysql-test-run.pl start up the server again
+--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+--enable_reconnect
+--source include/wait_until_connected_again.inc
+
+--echo # Cleanup
+DROP TABLE t1, t2, t3;
diff --git a/mysql-test/suite/innodb/t/innodb-alter-table-disk-full.test b/mysql-test/suite/innodb/t/innodb-alter-table-disk-full.test
index 4e3a7bfdae6..adeb2ef9fd2 100644
--- a/mysql-test/suite/innodb/t/innodb-alter-table-disk-full.test
+++ b/mysql-test/suite/innodb/t/innodb-alter-table-disk-full.test
@@ -1,5 +1,8 @@
# MDEV-6288: Innodb causes server crash after disk full, then can't ALTER TABLE any more
--source include/have_innodb.inc
+--source include/not_windows.inc
+--source include/not_valgrind.inc
+--source include/not_embedded.inc
# DEBUG_SYNC must be compiled in.
--source include/have_debug_sync.inc
diff --git a/mysql-test/suite/innodb/t/innodb-change-buffer-recovery.test b/mysql-test/suite/innodb/t/innodb-change-buffer-recovery.test
index 1774bb7f796..8cbe4938cab 100644
--- a/mysql-test/suite/innodb/t/innodb-change-buffer-recovery.test
+++ b/mysql-test/suite/innodb/t/innodb-change-buffer-recovery.test
@@ -14,6 +14,8 @@ if (`select plugin_auth_version < "5.6.17" from information_schema.plugins where
--source include/not_embedded.inc
# DBUG_SUICIDE() hangs under valgrind
--source include/not_valgrind.inc
+# No windows, need perl
+--source include/not_windows.inc
# The flag innodb_change_buffering_debug is only available in debug builds.
# It instructs InnoDB to try to evict pages from the buffer pool when
diff --git a/mysql-test/suite/innodb/t/innodb-mdev7046.test b/mysql-test/suite/innodb/t/innodb-mdev7046.test
new file mode 100644
index 00000000000..4033f284e65
--- /dev/null
+++ b/mysql-test/suite/innodb/t/innodb-mdev7046.test
@@ -0,0 +1,48 @@
+--source include/have_innodb.inc
+--source include/have_partition.inc
+
+--disable_query_log
+--disable_result_log
+--disable_warnings
+
+
+# Ignore OS errors
+call mtr.add_suppression("InnoDB: File ./test/t1*");
+call mtr.add_suppression("InnoDB: Error number*");
+call mtr.add_suppression("InnoDB: File ./test/t1#p#p1#sp#p1sp0.ibd: 'rename' returned OS error*");
+
+# MDEV-7046: MySQL#74480 - Failing assertion: os_file_status(newpath, &exists, &type)
+# after Operating system error number 36 in a file operation
+
+USE test;
+create table t1(f1 INT,f2 INT,f3 CHAR (10),primary key(f1,f2)) partition by range(f1) subpartition by hash(f2) subpartitions 2 (partition p1 values less than (0),partition p2 values less than (2),partition p3 values less than (2147483647));
+--replace_regex /'.*t2_new.*'/'t2_new'/
+--error 7
+RENAME TABLE t1 TO `t2_new..............................................end`;
+alter table t1 engine=innodb;
+--replace_regex /'.*t2_new.*'/'t2_new'/
+--error 1025
+RENAME TABLE t1 TO `t2_new..............................................end`;
+--replace_regex /'.*t2_new.*'/'t2_new'/
+show warnings;
+drop table t1;
+
+DROP DATABASE test;CREATE DATABASE test;USE test;
+SET @@session.storage_engine=MYISAM;
+--error 0,1,1103
+CREATE TABLE t1(id INT,purchased DATE)PARTITION BY RANGE(YEAR(purchased)) SUBPARTITION BY HASH(TO_DAYS(purchased)) SUBPARTITIONS 2 (PARTITION p0 VALUES LESS THAN MAXVALUE (SUBPARTITION sp0 DATA DIRECTORY='/tmp/not-existing' INDEX DIRECTORY='/tmp/not-existing',SUBPARTITION sp1));
+drop table if exists t1;
+CREATE TABLE t1(id INT,purchased DATE)PARTITION BY RANGE(YEAR(purchased)) SUBPARTITION BY HASH(TO_DAYS(purchased)) SUBPARTITIONS 2 (PARTITION p0 VALUES LESS THAN MAXVALUE (SUBPARTITION sp0,SUBPARTITION sp1));
+ALTER TABLE t1 ENGINE=InnoDB;
+
+drop table t1;
+
+let $datadir=`select @@datadir`;
+--remove_file $datadir/test/db.opt
+
+--enable_query_log
+--enable_result_log
+--enable_warnings
+
+# make sure that we have at least some ouput to avoid mtr warning
+--echo 1
diff --git a/mysql-test/suite/innodb/t/innodb-stats-sample.test b/mysql-test/suite/innodb/t/innodb-stats-sample.test
new file mode 100644
index 00000000000..35d35bfa382
--- /dev/null
+++ b/mysql-test/suite/innodb/t/innodb-stats-sample.test
@@ -0,0 +1,78 @@
+--source include/have_innodb.inc
+#
+# Test that mysqld does not crash when running ANALYZE TABLE with
+# different values of the parameter innodb_stats_sample_pages.
+#
+
+# we care only that the following SQL commands do not produce errors
+# and do not crash the server
+-- disable_query_log
+-- disable_result_log
+-- enable_warnings
+
+let $sample_pages=`select @@innodb_stats_sample_pages`;
+let $traditional=`select @@innodb_stats_traditional`;
+SET GLOBAL innodb_stats_sample_pages=0;
+#use new method to calculate statistics
+SET GLOBAL innodb_stats_traditional=0;
+
+# check that the value has been adjusted to 1
+-- enable_result_log
+SHOW VARIABLES LIKE 'innodb_stats_sample_pages';
+SHOW VARIABLES LIKE 'innodb_stats_traditional';
+-- disable_result_log
+
+CREATE TABLE innodb_analyze (
+ a INT,
+ b INT,
+ c char(50),
+ KEY(a),
+ KEY(b,a)
+) ENGINE=InnoDB;
+
+# test with empty table
+ANALYZE TABLE innodb_analyze;
+
+SET GLOBAL innodb_stats_sample_pages=2;
+ANALYZE TABLE innodb_analyze;
+
+SET GLOBAL innodb_stats_sample_pages=1;
+ANALYZE TABLE innodb_analyze;
+
+SET GLOBAL innodb_stats_sample_pages=8000;
+ANALYZE TABLE innodb_analyze;
+
+delimiter //;
+create procedure innodb_insert_proc (repeat_count int)
+begin
+ declare current_num int;
+ set current_num = 0;
+ while current_num < repeat_count do
+ insert into innodb_analyze values(current_num, current_num*100,substring(MD5(RAND()), -44));
+ set current_num = current_num + 1;
+ end while;
+end//
+delimiter ;//
+commit;
+
+set autocommit=0;
+call innodb_insert_proc(7000);
+commit;
+set autocommit=1;
+
+SET GLOBAL innodb_stats_sample_pages=1;
+ANALYZE TABLE innodb_analyze;
+
+SET GLOBAL innodb_stats_sample_pages=8;
+ANALYZE TABLE innodb_analyze;
+
+SET GLOBAL innodb_stats_sample_pages=16;
+ANALYZE TABLE innodb_analyze;
+
+SET GLOBAL innodb_stats_sample_pages=8000;
+ANALYZE TABLE innodb_analyze;
+
+DROP PROCEDURE innodb_insert_proc;
+DROP TABLE innodb_analyze;
+EVAL SET GLOBAL innodb_stats_sample_pages=$sample_pages;
+EVAL SET GLOBAL innodb_stats_traditional=$traditional; \ No newline at end of file
diff --git a/mysql-test/suite/innodb/t/innodb-wl5522-debug-zip.test b/mysql-test/suite/innodb/t/innodb-wl5522-debug-zip.test
index 1290b9b5bb7..4b03ac008d2 100644
--- a/mysql-test/suite/innodb/t/innodb-wl5522-debug-zip.test
+++ b/mysql-test/suite/innodb/t/innodb-wl5522-debug-zip.test
@@ -22,7 +22,7 @@ let MYSQLD_DATADIR =`SELECT @@datadir`;
let $innodb_file_per_table = `SELECT @@innodb_file_per_table`;
let $innodb_file_format = `SELECT @@innodb_file_format`;
let $innodb_strict_mode_orig=`select @@session.innodb_strict_mode`;
-let $pathfix=/: '.*test_wl5522.*t1.ibd'/: 'test_wl5522\\t1.ibd'/;
+let $pathfix=/: '.*test_wl5522.*t1.ibd'/: 'test_wl5522_t1.ibd'/;
SET GLOBAL innodb_file_per_table = 1;
SELECT @@innodb_file_per_table;
@@ -233,8 +233,7 @@ SET SESSION debug_dbug="-d,ib_import_reset_space_and_lsn_failure";
# Test failure after attempting a tablespace open
SET SESSION debug_dbug="+d,ib_import_open_tablespace_failure";
---replace_regex /file: '.*t1.ibd'/'t1.ibd'/
-
+--replace_regex /'.*[\/\\]/'/
--error ER_GET_ERRMSG
ALTER TABLE test_wl5522.t1 IMPORT TABLESPACE;
@@ -637,8 +636,7 @@ EOF
SET SESSION debug_dbug="+d,fil_space_create_failure";
---replace_regex $pathfix
-
+--replace_regex /'.*[\/\\]/'/
--error ER_GET_ERRMSG
ALTER TABLE test_wl5522.t1 IMPORT TABLESPACE;
@@ -669,8 +667,7 @@ EOF
SET SESSION debug_dbug="+d,dict_tf_to_fsp_flags_failure";
---replace_regex $pathfix
-
+--replace_regex /'.*[\/\\]/'/
--error ER_GET_ERRMSG
ALTER TABLE test_wl5522.t1 IMPORT TABLESPACE;
diff --git a/mysql-test/suite/innodb/t/innodb_bug12400341.test b/mysql-test/suite/innodb/t/innodb_bug12400341.test
index 173d42665be..165e19b7cbc 100644
--- a/mysql-test/suite/innodb/t/innodb_bug12400341.test
+++ b/mysql-test/suite/innodb/t/innodb_bug12400341.test
@@ -10,8 +10,13 @@ if (`select count(*)=0 from information_schema.global_variables where variable_n
# Don't test under valgrind, undo slots of the previous test might exist still
# and cause unstable result.
--source include/not_valgrind.inc
+# undo slots of the previous test might exist still
+--source include/not_windows.inc
-call mtr.add_suppression("InnoDB: Warning: cannot find a free slot for an undo log. Do you have too");
+# Previous undo slots cause unnecessary failures
+--source include/not_windows.inc
+
+call mtr.add_suppression("InnoDB: Warning: cannot find a free slot for an undo log. Do you have too*");
--disable_query_log
set @old_innodb_trx_rseg_n_slots_debug = @@innodb_trx_rseg_n_slots_debug;
diff --git a/mysql-test/suite/innodb/t/innodb_bug14147491.test b/mysql-test/suite/innodb/t/innodb_bug14147491.test
index 050f7fbdd73..6f0bfca8e1d 100644
--- a/mysql-test/suite/innodb/t/innodb_bug14147491.test
+++ b/mysql-test/suite/innodb/t/innodb_bug14147491.test
@@ -12,6 +12,9 @@ source include/not_embedded.inc;
source include/have_innodb.inc;
# Require Debug for SET DEBUG
source include/have_debug.inc;
+# Test could open crash reporter on Windows
+# if compiler set up
+source include/not_windows.inc;
CALL mtr.add_suppression("InnoDB: Error: Unable to read tablespace .* page no .* into the buffer pool after 100 attempts");
CALL mtr.add_suppression("InnoDB: Warning: database page corruption or a failed");
diff --git a/mysql-test/suite/innodb/t/innodb_corrupt_bit.test b/mysql-test/suite/innodb/t/innodb_corrupt_bit.test
index c57e52b65cc..12c560934ef 100644
--- a/mysql-test/suite/innodb/t/innodb_corrupt_bit.test
+++ b/mysql-test/suite/innodb/t/innodb_corrupt_bit.test
@@ -4,6 +4,11 @@
-- source include/have_innodb.inc
-- source include/have_innodb_16k.inc
+if (`select plugin_auth_version <= "5.5.40-MariaDB-36.1" from information_schema.plugins where plugin_name='innodb'`)
+{
+ --skip Not fixed in XtraDB as of 5.5.40-MariaDB-36.1 or earlier
+}
+
# Issues with innodb_change_buffering_debug on Windows, so the test scenario
# cannot be created on windows
--source include/not_windows.inc
@@ -18,14 +23,20 @@ call mtr.add_suppression("Flagged corruption of idx.*in CHECK TABLE");
# It instructs InnoDB to try to evict pages from the buffer pool when
# change buffering is possible, so that the change buffer will be used
# whenever possible.
--- error 0,ER_UNKNOWN_SYSTEM_VARIABLE
SET @innodb_change_buffering_debug_orig = @@innodb_change_buffering_debug;
--- error 0,ER_UNKNOWN_SYSTEM_VARIABLE
SET GLOBAL innodb_change_buffering_debug = 1;
# Turn off Unique Check to create corrupted index with dup key
SET UNIQUE_CHECKS=0;
+CREATE DATABASE pad;
+let $i=345;
+while ($i)
+{
+ --eval CREATE TABLE pad.t$i (a INT PRIMARY KEY) ENGINE=InnoDB;
+ dec $i;
+}
+
-- enable_query_log
set names utf8;
@@ -121,6 +132,6 @@ select z from corrupt_bit_test_Ä limit 10;
# Drop table
drop table corrupt_bit_test_Ä;
+DROP DATABASE pad;
--- error 0, ER_UNKNOWN_SYSTEM_VARIABLE
SET GLOBAL innodb_change_buffering_debug = 0;
diff --git a/mysql-test/suite/innodb/t/innodb_stats_create_on_corrupted.test b/mysql-test/suite/innodb/t/innodb_stats_create_on_corrupted.test
index 78c9334f800..de6026a23aa 100644
--- a/mysql-test/suite/innodb/t/innodb_stats_create_on_corrupted.test
+++ b/mysql-test/suite/innodb/t/innodb_stats_create_on_corrupted.test
@@ -33,4 +33,16 @@ FROM information_schema.tables WHERE table_name = 'test_ps_create_on_corrupted';
# restore the persistent storage
ALTER TABLE mysql.innodb_index_stats_ RENAME TO mysql.innodb_index_stats;
+--source include/restart_mysqld.inc
+
+-- vertical_results
+
+# check again
+SELECT seq_in_index, column_name, cardinality
+FROM information_schema.statistics WHERE table_name = 'test_ps_create_on_corrupted'
+ORDER BY index_name, seq_in_index;
+
+SELECT table_rows, avg_row_length, max_data_length, index_length
+FROM information_schema.tables WHERE table_name = 'test_ps_create_on_corrupted';
+
DROP TABLE test_ps_create_on_corrupted;
diff --git a/mysql-test/suite/innodb/t/innodb_stats_fetch_nonexistent.test b/mysql-test/suite/innodb/t/innodb_stats_fetch_nonexistent.test
index 6c7365c52d1..dbafb33965c 100644
--- a/mysql-test/suite/innodb/t/innodb_stats_fetch_nonexistent.test
+++ b/mysql-test/suite/innodb/t/innodb_stats_fetch_nonexistent.test
@@ -4,6 +4,8 @@
-- source include/have_innodb.inc
+call mtr.add_suppression("InnoDB: Error: Fetch of persistent statistics requested for table*");
+
-- vertical_results
CREATE TABLE test_ps_fetch_nonexistent
diff --git a/mysql-test/suite/innodb/t/insert_debug.test b/mysql-test/suite/innodb/t/insert_debug.test
new file mode 100644
index 00000000000..666b634bef9
--- /dev/null
+++ b/mysql-test/suite/innodb/t/insert_debug.test
@@ -0,0 +1,22 @@
+--source include/have_innodb.inc
+--source include/have_debug.inc
+--source include/have_partition.inc
+
+if (`select plugin_auth_version < "5.6.22" from information_schema.plugins where plugin_name='innodb'`)
+{
+ --skip Not fixed in InnoDB/XtraDB as of 5.6.21 or earlier
+}
+
+--echo #
+--echo # Bug#19904003 INNODB_LIMIT_OPTIMISTIC_INSERT_DEBUG=1
+--echo # CAUSES INFINITE PAGE SPLIT
+--echo #
+
+SET GLOBAL innodb_change_buffering_debug=1;
+SET GLOBAL innodb_limit_optimistic_insert_debug=1;
+CREATE TABLE t1(c1 INT PRIMARY KEY) ENGINE=InnoDB
+PARTITION BY HASH (c1) PARTITIONS 15;
+DROP TABLE t1;
+
+SET GLOBAL innodb_change_buffering_debug=0;
+SET GLOBAL innodb_limit_optimistic_insert_debug=0;
diff --git a/mysql-test/suite/innodb/t/multi_repair-7404.test b/mysql-test/suite/innodb/t/multi_repair-7404.test
new file mode 100644
index 00000000000..0775cd8b200
--- /dev/null
+++ b/mysql-test/suite/innodb/t/multi_repair-7404.test
@@ -0,0 +1,18 @@
+#
+# MDEV-7404 REPAIR multiple tables crash in MDL_ticket::has_stronger_or_equal_type
+#
+
+--source include/have_partition.inc
+--source include/have_innodb.inc
+create table `t1`(`a` int) engine=innodb partition by key (`a`);
+create table `t2`(`b` int) engine=innodb;
+create table `t3`(`c` int) engine=innodb;
+insert t1 values (1);
+insert t2 values (2);
+insert t3 values (3);
+repair table `t1`,`t2`,`t3`;
+select * from t1;
+select * from t2;
+select * from t3;
+drop table t1, t2, t3;
+
diff --git a/mysql-test/suite/innodb/t/sp_temp_table.test b/mysql-test/suite/innodb/t/sp_temp_table.test
new file mode 100644
index 00000000000..9a6be85fd7e
--- /dev/null
+++ b/mysql-test/suite/innodb/t/sp_temp_table.test
@@ -0,0 +1,108 @@
+--source include/have_innodb.inc
+--source include/big_test.inc
+
+if (`select plugin_auth_version < "5.6.22" from information_schema.plugins where plugin_name='innodb'`)
+{
+ --skip Not fixed in InnoDB/XtraDB as of 5.6.21 or earlier
+}
+
+--echo #
+--echo # Bug #19306524 FAILING ASSERTION WITH TEMP TABLE FOR A PROCEDURE
+--echo # CALLED FROM A FUNCTION
+--echo #
+
+call mtr.add_suppression("MySQL is trying to drop table");
+
+DELIMITER $$;
+CREATE PROCEDURE cachedata(
+ IN obj_id BIGINT UNSIGNED,
+ IN start DATETIME,
+ IN end DATETIME
+)
+
+cachedata:BEGIN
+ DECLARE cache_count BIGINT;
+
+ SET @timestamp := NOW();
+
+ CREATE TEMPORARY TABLE IF NOT EXISTS cachedata (
+ timestamp DATETIME,
+ object_id BIGINT UNSIGNED NOT NULL,
+ start DATETIME,
+ end DATETIME,
+ seqno BIGINT AUTO_INCREMENT,
+ value FLOAT,
+ PRIMARY KEY (seqno),
+ INDEX (timestamp),
+ INDEX (object_id, start, end)
+ ) ENGINE=INNODB;
+
+ DELETE FROM cachedata WHERE
+ timestamp < DATE_SUB(@timestamp, INTERVAL 15 SECOND);
+
+ SELECT count(*) INTO cache_count FROM cachedata WHERE
+ object_id = obj_id
+ AND start = start
+ AND end = end;
+
+ IF cache_count > 0 THEN LEAVE cachedata;
+ END IF;
+
+ INSERT INTO cachedata (timestamp, object_id, start, end, value) VALUES
+ (@timestamp, obj_id, start, end, 1234),
+ (@timestamp, obj_id, start, end, 4567),
+ (@timestamp, obj_id, start, end, 8901),
+ (@timestamp, obj_id, start, end, 1234),
+ (@timestamp, obj_id, start, end, 4567),
+ (@timestamp, obj_id, start, end, 8901),
+ (@timestamp, obj_id, start, end, 1234),
+ (@timestamp, obj_id, start, end, 4567),
+ (@timestamp, obj_id, start, end, 8901),
+ (@timestamp, obj_id, start, end, 1234),
+ (@timestamp, obj_id, start, end, 4567),
+ (@timestamp, obj_id, start, end, 8901),
+ (@timestamp, obj_id, start, end, 2345),
+ (@timestamp, obj_id, start, end, 1234),
+ (@timestamp, obj_id, start, end, 4567),
+ (@timestamp, obj_id, start, end, 8901),
+ (@timestamp, obj_id, start, end, 2345),
+ (@timestamp, obj_id, start, end, 1234),
+ (@timestamp, obj_id, start, end, 4567),
+ (@timestamp, obj_id, start, end, 8901),
+ (@timestamp, obj_id, start, end, 2345);
+
+END$$
+
+
+CREATE FUNCTION get_cache(
+ obj_id BIGINT UNSIGNED,
+ start DATETIME,
+ end DATETIME
+)
+ RETURNS FLOAT
+ READS SQL DATA
+BEGIN
+ DECLARE result FLOAT;
+
+ CALL cachedata(obj_id, start, end);
+
+ SELECT SUM(value) INTO result FROM cachedata WHERE
+ object_id = obj_id
+ AND start = start
+ AND end = end;
+
+ RETURN result;
+END$$
+
+DELIMITER ;$$
+
+let $i = 30;
+while ($i)
+{
+ SELECT get_cache(1, '2014-01-01', '2014-02-01');
+ select sleep(1);
+ dec $i;
+}
+
+DROP FUNCTION get_cache;
+DROP PROCEDURE cachedata;
diff --git a/mysql-test/suite/innodb/t/strict_mode.test b/mysql-test/suite/innodb/t/strict_mode.test
new file mode 100644
index 00000000000..9b115091f84
--- /dev/null
+++ b/mysql-test/suite/innodb/t/strict_mode.test
@@ -0,0 +1,251 @@
+--source include/have_innodb.inc
+
+if (`select plugin_auth_version <= "5.5.40-MariaDB-36.1" from information_schema.plugins where plugin_name='innodb'`)
+{
+ --skip Not fixed in XtraDB as of 5.5.40-MariaDB-36.1 or earlier
+}
+
+--echo #
+--echo # Bug #17852083 PRINT A WARNING WHEN DDL HAS AN ERROR IN
+--echo # INNODB_STRICT_MODE = 1
+--echo #
+
+set innodb_strict_mode = 0;
+
+create table t1 (id int auto_increment primary key,
+v varchar(32),
+col1 text,
+col2 text,
+col3 text,
+col4 text,
+col5 text,
+col6 text,
+col7 text,
+col8 text,
+col9 text,
+col10 text,
+col11 text,
+col12 text,
+col13 text,
+col14 text,
+col15 text,
+col16 text,
+col17 text,
+col18 text,
+col19 text,
+col20 text,
+col21 text,
+col22 text,
+col23 text,
+col24 text,
+col25 text,
+col26 text,
+col27 text,
+col28 text,
+col29 text,
+col30 text,
+col31 text,
+col32 text,
+col33 text,
+col34 text,
+col35 text,
+col36 text,
+col37 text,
+col38 text,
+col39 text,
+col40 text,
+col41 text,
+col42 text,
+col43 text,
+col44 text,
+col45 text ,
+col46 text,
+col47 text,
+col48 text,
+col49 text,
+col50 text,
+col51 text,
+col52 text,
+col53 text,
+col54 text,
+col55 text,
+col56 text,
+col57 text,
+col58 text,
+col59 text,
+col60 text,
+col61 text,
+col62 text,
+col63 text,
+col64 text,
+col65 text,
+col66 text,
+col67 text,
+col68 text ,
+col69 text,
+col70 text,
+col71 text,
+col72 text,
+col73 text,
+col74 text,
+col75 text,
+col76 text,
+col77 text,
+col78 text,
+col79 text,
+col80 text,
+col81 text,
+col82 text,
+col83 text,
+col84 text,
+col85 text,
+col86 text,
+col87 text,
+col88 text,
+col89 text,
+col90 text,
+col91 text,
+col92 text,
+col93 text,
+col94 text,
+col95 text,
+col96 text,
+col97 text,
+col98 text,
+col99 text,
+col100 text,
+col101 text,
+col102 text,
+col103 text,
+col104 text,
+col105 text,
+col106 text,
+col107 text,
+col108 text,
+col109 text,
+col110 text,
+col111 text,
+col112 text,
+col113 text,
+col114 text,
+col115 text,
+col116 text,
+col117 text,
+col118 text,
+col119 text,
+col120 text,
+col121 text,
+col122 text,
+col123 text,
+col124 text,
+col125 text,
+col126 text ,
+col127 text,
+col128 text,
+col129 text,
+col130 text,
+col131 text,
+col132 text,
+col133 text,
+col134 text,
+col135 text,
+col136 text,
+col137 text,
+col138 text,
+col139 text,
+col140 text,
+col141 text,
+col142 text,
+col143 text,
+col144 text,
+col145 text,
+col146 text,
+col147 text ,
+col148 text,
+col149 text,
+col150 text,
+col151 text,
+col152 text,
+col153 text,
+col154 text,
+col155 text,
+col156 text,
+col157 text,
+col158 text,
+col159 text,
+col160 text,
+col161 text,
+col162 text,
+col163 text,
+col164 text,
+col165 text,
+col166 text,
+col167 text,
+col168 text,
+col169 text,
+col170 text,
+col171 text,
+col172 text ,
+col173 text,
+col174 text,
+col175 text,
+col176 text,
+col177 text,
+col178 text,
+col179 text,
+col180 text,
+col181 text,
+col182 text,
+col183 text,
+col184 text,
+col185 text,
+col186 text,
+col187 text,
+col188 text,
+col189 text,
+col190 text,
+col191 text,
+col192 text,
+col193 text,
+col194 text,
+col195 text,
+col196 text,
+col197 text,
+col198 text,
+col199 text,
+col200 text,
+col201 text,
+col202 text,
+col203 text,
+col204 text,
+col205 text,
+col206 text,
+col207 text,
+col208 text,
+col209 text,
+col210 text,
+col211 text,
+col212 text,
+col213 text,
+col214 text,
+col215 text,
+col216 text,
+col217 text,
+col218 text,
+col219 text,
+col220 text,
+col221 text,
+col222 text,
+col223 text,
+col224 text,
+col225 text,
+col226 text,
+col227 text,
+col228 text
+) ENGINE=InnoDB;
+
+set innodb_strict_mode = 1;
+--error ER_TOO_BIG_ROWSIZE
+alter table t1 engine=InnoDB;
+
+drop table t1;
diff --git a/mysql-test/suite/maria/insert_select-7314.result b/mysql-test/suite/maria/insert_select-7314.result
new file mode 100644
index 00000000000..2f25cc5855c
--- /dev/null
+++ b/mysql-test/suite/maria/insert_select-7314.result
@@ -0,0 +1,17 @@
+drop table if exists t1;
+Warnings:
+Note 1051 Unknown table 'test.t1'
+#
+# MDEV-7314
+# Concurrent "INSERT INTO table SELECT MAX(id)+1 FROM table" are
+# hitting deadlocks on Aria tables using ROW_FORMAT=PAGE
+#
+create table t1 (pk int primary key) engine=Aria;
+insert into t1 values (1);
+insert into t1 select sleep(2)+1 from t1;
+insert into t1 select 2 from t1;
+select * from t1;
+pk
+1
+2
+drop table t1;
diff --git a/mysql-test/suite/maria/insert_select-7314.test b/mysql-test/suite/maria/insert_select-7314.test
new file mode 100644
index 00000000000..f5869edd4ce
--- /dev/null
+++ b/mysql-test/suite/maria/insert_select-7314.test
@@ -0,0 +1,27 @@
+-- source include/have_maria.inc
+--source include/have_binlog_format_statement.inc
+
+drop table if exists t1;
+
+--echo #
+--echo # MDEV-7314
+--echo # Concurrent "INSERT INTO table SELECT MAX(id)+1 FROM table" are
+--echo # hitting deadlocks on Aria tables using ROW_FORMAT=PAGE
+--echo #
+
+create table t1 (pk int primary key) engine=Aria;
+insert into t1 values (1);
+
+send insert into t1 select sleep(2)+1 from t1;
+
+--connect (con1,localhost,root,,)
+
+--error 0,1062
+insert into t1 select 2 from t1;
+
+--connection default
+--error 0,1062
+--reap
+
+select * from t1;
+drop table t1;
diff --git a/mysql-test/suite/maria/insert_select.result b/mysql-test/suite/maria/insert_select.result
new file mode 100644
index 00000000000..878914ddabf
--- /dev/null
+++ b/mysql-test/suite/maria/insert_select.result
@@ -0,0 +1,6 @@
+create table t1 (pk int primary key) engine=Aria;
+insert into t1 values (1);
+insert into t1 select sleep(2)+1 from t1;
+insert into t1 select 2 from t1;
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+drop table t1;
diff --git a/mysql-test/suite/maria/insert_select.test b/mysql-test/suite/maria/insert_select.test
new file mode 100644
index 00000000000..a86edc7d857
--- /dev/null
+++ b/mysql-test/suite/maria/insert_select.test
@@ -0,0 +1,21 @@
+#
+# MDEV-4010
+# Deadlock on concurrent INSERT .. SELECT into an Aria table with statement
+# binary logging
+#
+--source include/have_binlog_format_statement.inc
+
+create table t1 (pk int primary key) engine=Aria;
+insert into t1 values (1);
+
+send insert into t1 select sleep(2)+1 from t1;
+
+--connect (con1,localhost,root,,)
+
+insert into t1 select 2 from t1;
+
+--connection default
+--error 1062
+--reap
+--disconnect con1
+drop table t1;
diff --git a/mysql-test/suite/multi_source/gtid.result b/mysql-test/suite/multi_source/gtid.result
index ce926ddc995..28ac1e7429e 100644
--- a/mysql-test/suite/multi_source/gtid.result
+++ b/mysql-test/suite/multi_source/gtid.result
@@ -26,59 +26,6 @@ SET SQL_LOG_BIN=0;
CREATE TABLE t3 (a INT PRIMARY KEY, b VARCHAR(10));
SET SQL_LOG_BIN=1;
INSERT INTO t3 VALUES (201, "initial 2");
-SHOW ALL SLAVES STATUS;
-Connection_name
-Slave_SQL_State Slave has read all relay log; waiting for the slave I/O thread to update it
-Slave_IO_State Waiting for master to send event
-Master_Host 127.0.0.1
-Master_User root
-Master_Port MYPORT_3
-Connect_Retry 60
-Master_Log_File server3-bin.000001
-Read_Master_Log_Pos 1501
-Relay_Log_File mysqld-relay-bin.000002
-Relay_Log_Pos 1790
-Relay_Master_Log_File server3-bin.000001
-Slave_IO_Running Yes
-Slave_SQL_Running Yes
-Replicate_Do_DB
-Replicate_Ignore_DB
-Replicate_Do_Table
-Replicate_Ignore_Table
-Replicate_Wild_Do_Table
-Replicate_Wild_Ignore_Table
-Last_Errno 0
-Last_Error
-Skip_Counter 0
-Exec_Master_Log_Pos 1501
-Relay_Log_Space 2088
-Until_Condition None
-Until_Log_File
-Until_Log_Pos 0
-Master_SSL_Allowed No
-Master_SSL_CA_File
-Master_SSL_CA_Path
-Master_SSL_Cert
-Master_SSL_Cipher
-Master_SSL_Key
-Seconds_Behind_Master 0
-Master_SSL_Verify_Server_Cert No
-Last_IO_Errno 0
-Last_IO_Error
-Last_SQL_Errno 0
-Last_SQL_Error
-Replicate_Ignore_Server_Ids
-Master_Server_Id 3
-Master_SSL_Crl
-Master_SSL_Crlpath
-Using_Gtid No
-Gtid_IO_Pos
-Retried_transactions 0
-Max_relay_log_size 1073741824
-Executed_log_entries 25
-Slave_received_heartbeats 0
-Slave_heartbeat_period 60.000
-Gtid_Slave_Pos 1-1-4,2-2-3
*** Now move slave2 to replicate from both master1 and master2 instead of just slave1 ***
STOP ALL SLAVES;
Warnings:
@@ -102,9 +49,11 @@ INSERT INTO t1 VALUES (3, "switch 2");
INSERT INTO t3 VALUES (103, "switch 2 a");
INSERT INTO t2 VALUES (3, "switch 2");
INSERT INTO t3 VALUES (203, "switch 2 b");
+include/save_master_gtid.inc
STOP SLAVE 'slave2';
INSERT INTO t2 VALUES (4, "switch 3");
INSERT INTO t3 VALUES (204, "switch 3 b");
+include/sync_with_master_gtid.inc
CHANGE MASTER TO master_port=MYPORT_4, master_host='127.0.0.1', master_user='root', master_use_gtid=current_pos;
START SLAVE;
SELECT * FROM t1 ORDER BY a;
diff --git a/mysql-test/suite/multi_source/gtid.test b/mysql-test/suite/multi_source/gtid.test
index 7a085823693..bebee66068f 100644
--- a/mysql-test/suite/multi_source/gtid.test
+++ b/mysql-test/suite/multi_source/gtid.test
@@ -58,8 +58,6 @@ INSERT INTO t3 VALUES (201, "initial 2");
--source include/wait_condition.inc
--let $wait_condition= SELECT (SELECT COUNT(*) FROM t1)=1 AND (SELECT COUNT(*) FROM t2)=1 AND (SELECT COUNT(*) FROM t3)=2
--source include/wait_condition.inc
---replace_result $SERVER_MYPORT_3 MYPORT_3
-query_vertical SHOW ALL SLAVES STATUS;
--echo *** Now move slave2 to replicate from both master1 and master2 instead of just slave1 ***
STOP ALL SLAVES;
@@ -93,6 +91,8 @@ set default_master_connection = '';
--connection slave1
# Set up so that slave1 will have to start from two different positions
# in the slave2 binlog (one for each domain_id).
+--let $wait_condition= SELECT (SELECT COUNT(*) FROM t1)=2 AND (SELECT COUNT(*) FROM t2)=2 AND (SELECT COUNT(*) FROM t3)=4
+--source include/wait_condition.inc
STOP SLAVE 'slave1';
--connection master1
@@ -106,6 +106,7 @@ INSERT INTO t3 VALUES (103, "switch 2 a");
--connection master2
INSERT INTO t2 VALUES (3, "switch 2");
INSERT INTO t3 VALUES (203, "switch 2 b");
+--source include/save_master_gtid.inc
--connection slave1
--let $wait_condition= SELECT (SELECT COUNT(*) FROM t1)=2 AND (SELECT COUNT(*) FROM t2)=3 AND (SELECT COUNT(*) FROM t3)=5
@@ -116,6 +117,12 @@ STOP SLAVE 'slave2';
INSERT INTO t2 VALUES (4, "switch 3");
INSERT INTO t3 VALUES (204, "switch 3 b");
+--connection slave2
+# Make sure that slave2 has replicated far enough before connecting slave1 to
+# it (otherwise we get an error that slave1 requested to start from a GTID
+# which is not in slave2's binlogs).
+--source include/sync_with_master_gtid.inc
+
--connection slave1
--replace_result $SERVER_MYPORT_4 MYPORT_4
eval CHANGE MASTER TO master_port=$SERVER_MYPORT_4, master_host='127.0.0.1', master_user='root', master_use_gtid=current_pos;
diff --git a/mysql-test/suite/perfschema/r/unary_digest.result b/mysql-test/suite/perfschema/r/unary_digest.result
new file mode 100644
index 00000000000..40fcc026fad
--- /dev/null
+++ b/mysql-test/suite/perfschema/r/unary_digest.result
@@ -0,0 +1,47 @@
+TRUNCATE TABLE performance_schema.events_statements_summary_by_digest;
+ERROR 42S02: Table 'test.expect_unary' doesn't exist
+ERROR 42S02: Table 'test.expect_unary' doesn't exist
+ERROR 42S02: Table 'test.expect_unary' doesn't exist
+ERROR 42S02: Table 'test.expect_unary' doesn't exist
+ERROR 42S02: Table 'test.expect_unary' doesn't exist
+ERROR 42S02: Table 'test.expect_binary' doesn't exist
+ERROR 42S02: Table 'test.expect_binary' doesn't exist
+ERROR 42S02: Table 'test.expect_binary' doesn't exist
+ERROR 42S02: Table 'test.expect_binary' doesn't exist
+ERROR 42S02: Table 'test.expect_full_reduce' doesn't exist
+ERROR 42S02: Table 'test.expect_full_reduce' doesn't exist
+ERROR 42S02: Table 'test.expect_full_reduce' doesn't exist
+ERROR 42S02: Table 'test.expect_full_reduce' doesn't exist
+ERROR 42S02: Table 'test.expect_full_reduce' doesn't exist
+ERROR 42S02: Table 'test.expect_full_reduce' doesn't exist
+ERROR 42S02: Table 'test.expect_full_reduce' doesn't exist
+ERROR 42S02: Table 'test.expect_full_reduce' doesn't exist
+ERROR 42S02: Table 'test.expect_full_reduce' doesn't exist
+ERROR 42S02: Table 'test.expect_full_reduce' doesn't exist
+ERROR 42S02: Table 'test.expect_full_reduce' doesn't exist
+ERROR 42S02: Table 'test.expect_full_reduce' doesn't exist
+ERROR 42S02: Table 'test.expect_full_reduce' doesn't exist
+ERROR 42S02: Table 'test.expect_full_reduce' doesn't exist
+ERROR 42S02: Table 'test.expect_full_reduce' doesn't exist
+ERROR 42S02: Table 'test.expect_full_reduce' doesn't exist
+ERROR 42S02: Table 'test.expect_full_reduce' doesn't exist
+ERROR 42S02: Table 'test.expect_full_reduce' doesn't exist
+ERROR 42S02: Table 'test.expect_full_reduce' doesn't exist
+ERROR 42S02: Table 'test.expect_full_reduce' doesn't exist
+ERROR 42S02: Table 'test.expect_full_reduce' doesn't exist
+ERROR 42S02: Table 'test.expect_full_reduce' doesn't exist
+ERROR 42S02: Table 'test.expect_full_reduce' doesn't exist
+ERROR 42S02: Table 'test.expect_full_reduce' doesn't exist
+ERROR 42S02: Table 'test.expect_full_reduce' doesn't exist
+ERROR 42S02: Table 'test.expect_full_reduce' doesn't exist
+ERROR 42S02: Table 'test.expect_full_reduce' doesn't exist
+ERROR 42S02: Table 'test.expect_unchanged' doesn't exist
+SELECT SCHEMA_NAME, DIGEST_TEXT, COUNT_STAR
+FROM performance_schema.events_statements_summary_by_digest;
+SCHEMA_NAME DIGEST_TEXT COUNT_STAR
+test TRUNCATE TABLE performance_schema . events_statements_summary_by_digest 1
+test SELECT ? FROM expect_unary 5
+test SELECT ? + ? FROM expect_binary 2
+test SELECT ? - ? FROM expect_binary 2
+test INSERT INTO expect_full_reduce VALUES (...) 27
+test SELECT a - b , a + b , - a , - b , + a , + b FROM expect_unchanged 1
diff --git a/mysql-test/suite/perfschema/t/setup_instruments_defaults.test b/mysql-test/suite/perfschema/t/setup_instruments_defaults.test
index 5fd5acceb34..e1f61404d12 100644
--- a/mysql-test/suite/perfschema/t/setup_instruments_defaults.test
+++ b/mysql-test/suite/perfschema/t/setup_instruments_defaults.test
@@ -60,6 +60,8 @@ WHERE name like "%wait/io/table/sql/handler%";
--echo #
--echo # Stop server
--send_shutdown
+--source include/wait_until_disconnected.inc
+
--echo # Restart server with wait/io/table/sql/handler disabled
--exec echo "restart:--loose-performance-schema-instrument=%wait/io/table/sql/%=off" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
diff --git a/mysql-test/suite/perfschema/t/unary_digest.test b/mysql-test/suite/perfschema/t/unary_digest.test
new file mode 100644
index 00000000000..c4583484f36
--- /dev/null
+++ b/mysql-test/suite/perfschema/t/unary_digest.test
@@ -0,0 +1,98 @@
+# ----------------------------------------------------
+# Tests for the performance schema statement Digests.
+# ----------------------------------------------------
+
+# Test case to show behavior of statements digest when
+# statement-digest-size is 0
+
+--source include/not_embedded.inc
+--source include/have_perfschema.inc
+--source ../include/no_protocol.inc
+
+TRUNCATE TABLE performance_schema.events_statements_summary_by_digest;
+
+--disable_query_log
+
+--error ER_NO_SUCH_TABLE
+select 1 from expect_unary;
+--error ER_NO_SUCH_TABLE
+select +1 from expect_unary;
+--error ER_NO_SUCH_TABLE
+select -1 from expect_unary;
+--error ER_NO_SUCH_TABLE
+select ++++++++++++++++++++++++++++++++++++++++++++++++1 from expect_unary;
+--error ER_NO_SUCH_TABLE
+select ------------------------------------------------1 from expect_unary;
+
+--error ER_NO_SUCH_TABLE
+select 0+1 from expect_binary;
+--error ER_NO_SUCH_TABLE
+select 0-1 from expect_binary;
+--error ER_NO_SUCH_TABLE
+select 0 ++++++++++++++++++++++++++++++++++++++++++++++++1 from expect_binary;
+--error ER_NO_SUCH_TABLE
+select 0 ------------------------------------------------1 from expect_binary;
+
+--error ER_NO_SUCH_TABLE
+insert into expect_full_reduce values (0, 0, 0);
+--error ER_NO_SUCH_TABLE
+insert into expect_full_reduce values (0, 0, -1);
+--error ER_NO_SUCH_TABLE
+insert into expect_full_reduce values (0, 0, +1);
+--error ER_NO_SUCH_TABLE
+insert into expect_full_reduce values (0, -1, 0);
+--error ER_NO_SUCH_TABLE
+insert into expect_full_reduce values (0, -1, -1);
+--error ER_NO_SUCH_TABLE
+insert into expect_full_reduce values (0, -1, +1);
+--error ER_NO_SUCH_TABLE
+insert into expect_full_reduce values (0, +1, 0);
+--error ER_NO_SUCH_TABLE
+insert into expect_full_reduce values (0, +1, -1);
+--error ER_NO_SUCH_TABLE
+insert into expect_full_reduce values (0, +1, +1);
+--error ER_NO_SUCH_TABLE
+insert into expect_full_reduce values (-1, 0, 0);
+--error ER_NO_SUCH_TABLE
+insert into expect_full_reduce values (-1, 0, -1);
+--error ER_NO_SUCH_TABLE
+insert into expect_full_reduce values (-1, 0, +1);
+--error ER_NO_SUCH_TABLE
+insert into expect_full_reduce values (-1, -1, 0);
+--error ER_NO_SUCH_TABLE
+insert into expect_full_reduce values (-1, -1, -1);
+--error ER_NO_SUCH_TABLE
+insert into expect_full_reduce values (-1, -1, +1);
+--error ER_NO_SUCH_TABLE
+insert into expect_full_reduce values (-1, +1, 0);
+--error ER_NO_SUCH_TABLE
+insert into expect_full_reduce values (-1, +1, -1);
+--error ER_NO_SUCH_TABLE
+insert into expect_full_reduce values (-1, +1, +1);
+--error ER_NO_SUCH_TABLE
+insert into expect_full_reduce values (+1, 0, 0);
+--error ER_NO_SUCH_TABLE
+insert into expect_full_reduce values (+1, 0, -1);
+--error ER_NO_SUCH_TABLE
+insert into expect_full_reduce values (+1, 0, +1);
+--error ER_NO_SUCH_TABLE
+insert into expect_full_reduce values (+1, -1, 0);
+--error ER_NO_SUCH_TABLE
+insert into expect_full_reduce values (+1, -1, -1);
+--error ER_NO_SUCH_TABLE
+insert into expect_full_reduce values (+1, -1, +1);
+--error ER_NO_SUCH_TABLE
+insert into expect_full_reduce values (+1, +1, 0);
+--error ER_NO_SUCH_TABLE
+insert into expect_full_reduce values (+1, +1, -1);
+--error ER_NO_SUCH_TABLE
+insert into expect_full_reduce values (+1, +1, +1);
+
+--error ER_NO_SUCH_TABLE
+select a-b, a+b, -a, -b, +a, +b from expect_unchanged;
+
+--enable_query_log
+
+SELECT SCHEMA_NAME, DIGEST_TEXT, COUNT_STAR
+ FROM performance_schema.events_statements_summary_by_digest;
+
diff --git a/mysql-test/suite/plugins/t/server_audit.test b/mysql-test/suite/plugins/t/server_audit.test
index f63c8022392..869fd944d51 100644
--- a/mysql-test/suite/plugins/t/server_audit.test
+++ b/mysql-test/suite/plugins/t/server_audit.test
@@ -43,6 +43,7 @@ show variables like 'server_audit%';
set global server_audit_mode=1;
set global server_audit_events='';
create database sa_db;
+--sleep 2
connect (con1,localhost,root,,test);
connection con1;
create table t1 (id2 int);
diff --git a/mysql-test/suite/rpl/r/myisam_external_lock.result b/mysql-test/suite/rpl/r/myisam_external_lock.result
new file mode 100644
index 00000000000..ef24f8b4e3c
--- /dev/null
+++ b/mysql-test/suite/rpl/r/myisam_external_lock.result
@@ -0,0 +1,12 @@
+include/master-slave.inc
+[connection master]
+drop table if exists t1;
+Warnings:
+Note 1051 Unknown table 'test.t1'
+CREATE TABLE `t1` (`col1` int(11) NOT NULL,`col2` int(11) NOT NULL,
+PRIMARY KEY (`col1`,`col2`),
+KEY `col2` (`col2`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1;
+INSERT INTO `t1` VALUES (2775,974),(2775,975),(2775,976),(2778,977),(2778,978),(2782,979),(2790,986),(2790,1139),(2792,840),(2792,984),(2792,989),(2793,982),(2793,992),(2793,993),(2793,994),(2795,323),(2795,332),(2797,980),(2797,997),(2797,998),(2798,1103),(2798,1104),(2799,841),(2799,985),(2799,988),(2833,983),(2833,990),(2833,991),(2834,981),(2834,995),(2834,996),(2835,316),(2835,317),(3007,854),(3007,856),(3008,855),(3008,857),(3009,823),(3009,824),(3014,1),(3015,1),(3016,2),(3017,2),(3018,3),(3019,3),(3024,842),(3024,843),(3024,844),(3025,845),(3025,846),(3025,847),(3040,31),(3041,32),(3042,52),(3042,55),(3043,53),(3043,54),(3044,278),(3044,279),(3044,280),(3044,281),(3044,282),(3044,283),(3044,284),(3044,285),(3045,1),(3046,1),(3049,220),(3050,221),(3050,222),(3051,2),(3052,2),(3053,223),(3054,224),(3055,225),(3056,226),(3057,227),(3058,228),(3059,229),(3060,327),(3066,236),(3067,237),(3068,238),(3069,239),(3070,240),(3080,241),(3081,242),(3082,247),(3083,248),(3084,249),(3085,250),(3086,251),(3087,252),(3088,253),(3089,254),(3090,255),(3091,256),(3092,257),(3093,258),(3094,259),(3096,263),(3097,264),(3100,273),(3100,302),(3101,266),(3102,267),(3103,268),(3104,269),(3105,270),(3111,275),(3112,238),(3113,272),(3115,286),(3116,318),(3116,319),(3117,290),(3117,292),(3118,238),(3119,291),(3119,293),(3120,304),(3121,305),(3122,306),(3123,307),(3124,308),(3125,309),(3126,310),(3127,311),(3128,312),(3128,336),(3129,313),(3129,350),(3130,314),(3131,315),(3131,351),(3132,325),(3132,328),(3134,502),(3138,334),(3139,338),(3139,339),(3140,340),(3140,341),(3141,344),(3141,345),(3142,346),(3142,347),(3149,351),(3149,354),(3150,351),(3150,356),(3152,358),(3152,359),(3153,361),(3153,370),(3154,363),(3154,369),(3156,350),(3156,371),(3159,376),(3160,377),(3160,379),(3160,384),(3161,378),(3161,380),(3161,383),(3162,388),(3162,389),(3162,390),(3169,392),(3169,393),(3169,394),(3170,395),(3170,396),(3170,397),(3171,398),(3171,399),(3171,400),(3172,401),(3172,402),(3172,403),(3173,404),(3173,405),(3173,406),(3178,351),(3178,421),(3190,411),(3190,412),(3191,413),(3191,414),(3192,415),(3192,416),(3193,417),(3193,418),(3194,419),(3194,420),(3195,353),(3195,424),(3196,425),(3196,426),(3197,427),(3197,428),(3198,429),(3198,430),(3199,431),(3199,432),(3200,433),(3200,434),(3201,435),(3201,436),(3202,437),(3202,438),(3203,439),(3203,440),(3204,441),(3204,442),(3205,443),(3205,444),(3206,445),(3206,446),(3207,447),(3207,448),(3208,449),(3208,450),(3209,451),(3209,452),(3210,453),(3210,454),(3211,455),(3211,456),(3212,457),(3212,458),(3213,459),(3213,460),(3214,461),(3214,462),(3215,463),(3215,464),(3218,466),(3218,467),(3218,468),(3219,469),(3219,470),(3219,471),(3220,474),(3220,475),(3220,476),(3221,477),(3221,478),(3221,479),(3222,480),(3222,481),(3223,482),(3223,483),(3224,484),(3224,485),(3225,486),(3225,487),(3227,503),(3227,505),(3228,506),(3228,507),(3230,508),(3230,509),(3231,510),(3231,511),(3232,512),(3232,513),(3233,514),(3233,515),(3234,516),(3234,517),(3235,518),(3235,519),(3237,521),(3237,522),(3239,524),(3239,525),(3240,526),(3240,527),(3241,528),(3241,529),(3242,530),(3242,531),(3243,532),(3243,533),(3244,534),(3244,535),(3245,536),(3245,537),(3246,538),(3246,539),(3252,540),(3252,541),(3254,543),(3254,544),(3254,545),(3255,547),(3255,548),(3255,571),(3256,550),(3256,551),(3256,572),(3257,553),(3257,554),(3257,573),(3258,556),(3258,557),(3258,574),(3259,559),(3259,560),(3259,575),(3260,561),(3260,562),(3260,563),(3261,565),(3261,576),(3262,566),(3262,567),(3263,568),(3263,569),(3263,570),(3264,577),(3264,578),(3265,579),(3265,580),(3266,581),(3266,582),(3266,591),(3267,583),(3267,584),(3267,592),(3268,585),(3268,586),(3268,593),(3269,587),(3269,588),(3269,594),(3270,589),(3270,590),(3271,595),(3271,596),(3271,597),(3272,598),(3272,599),(3273,600),(3273,601),(3273,602),(3274,603),(3274,604),(3274,605),(3275,606),(3275,607),(3275,608),(3276,609),(3276,610),(3276,611),(3277,612),(3277,613),(3277,614),(3278,615),(3278,616),(3279,617),(3279,618),(3279,619),(3279,628),(3279,629),(3280,620),(3280,621),(3280,622),(3281,623),(3281,624),(3281,625),(3282,626),(3282,825),(3283,630),(3283,631),(3284,632),(3284,633),(3284,634),(3285,635),(3285,940),(3286,638),(3286,639),(3286,640),(3287,641),(3287,642),(3287,643),(3288,644),(3288,645),(3288,646),(3289,647),(3289,648),(3289,649),(3290,650),(3290,651),(3290,652),(3291,653),(3291,654),(3291,655),(3292,656),(3292,657),(3292,658),(3293,659),(3293,660),(3293,661),(3294,662),(3294,663),(3294,664),(3295,665),(3295,666),(3295,667),(3296,668),(3296,669),(3296,670),(3297,671),(3297,672),(3297,673),(3298,674),(3298,675),(3298,676),(3299,677),(3299,678),(3299,679),(3300,680),(3300,681),(3300,682),(3301,683),(3301,684),(3301,685),(3302,686),(3302,687),(3302,688),(3303,689),(3303,690),(3303,691),(3304,692),(3304,693),(3304,694),(3305,695),(3305,696),(3305,697),(3306,698),(3306,699),(3306,700),(3307,701),(3307,702),(3307,703),(3308,704),(3308,705),(3308,706),(3309,707),(3309,708),(3310,709),(3310,710),(3311,711),(3311,712),(3311,713),(3312,714),(3312,715),(3312,716),(3313,717),(3313,1167),(3314,720),(3314,721),(3314,722),(3315,723),(3315,724),(3315,725),(3316,726),(3316,727),(3316,728),(3317,729),(3317,730),(3317,731),(3318,732),(3318,733),(3318,734),(3319,735),(3319,736),(3319,737),(3320,738),(3320,739),(3320,740),(3321,741),(3321,742),(3322,743),(3322,744),(3323,745),(3323,746),(3323,747),(3324,748),(3324,749),(3324,750),(3325,751),(3325,752),(3325,753),(3326,754),(3326,755),(3327,756),(3327,757),(3328,758),(3328,789),(3329,761),(3329,790),(3330,762),(3330,763),(3331,768),(3331,785),(3331,786),(3332,769),(3332,783),(3332,784),(3335,766),(3336,767),(3343,770),(3343,771),(3344,772),(3344,773),(3345,774),(3345,775),(3347,776),(3347,777),(3347,987),(3348,778),(3348,779),(3349,780),(3372,781),(3372,782),(3373,787),(3373,788),(3376,791),(3376,792),(3377,793),(3377,794),(3378,799),(3378,800),(3379,801),(3379,802),(3380,795),(3380,796),(3381,797),(3381,798),(3383,805),(3384,806),(3384,807),(3385,808),(3385,809),(3386,810),(3386,811),(3387,812),(3387,814),(3388,815),(3388,816),(3391,817),(3391,818),(3391,819),(3392,820),(3392,821),(3392,822),(3393,826),(3393,827),(3394,828),(3394,829),(3395,830),(3395,831),(3396,834),(3396,835),(3397,832),(3397,833),(3398,836),(3398,837),(3399,838),(3399,839),(3410,850),(3410,851),(3411,852),(3411,853),(3412,848),(3412,849),(3419,860),(3419,951),(3420,859),(3420,861),(3422,862),(3422,863),(3423,864),(3423,865),(3424,866),(3424,867),(3424,872),(3424,873),(3425,868),(3425,869),(3425,874),(3425,875),(3426,878),(3426,879),(3427,876),(3427,877),(3428,880),(3432,884),(3432,885),(3432,886),(3434,887),(3434,888),(3434,889),(3441,894),(3441,895),(3442,896),(3442,897),(3444,904),(3445,905),(3449,906),(3449,907),(3450,908),(3450,909),(3453,910),(3458,915),(3458,916),(3459,917),(3459,918),(3463,919),(3463,920),(3485,929),(3486,930),(3487,931),(3488,932),(3489,933),(3493,2),(3494,2),(3501,934),(3502,936),(3503,938),(3504,939),(3505,941),(3506,942),(3507,943),(3508,944),(3509,945),(3510,946),(3511,947),(3512,948),(3514,949),(3514,950),(3515,953),(3516,954),(3517,955),(3518,956),(3519,957),(3520,958),(3521,959),(3527,960),(3527,965),(3528,961),(3528,962),(3529,963),(3529,964),(3530,966),(3530,967),(3531,968),(3531,969),(3535,970),(3535,971),(3536,972),(3536,973),(3540,999),(3540,1000),(3541,1001),(8888,9999);
+drop table t1;
+include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_gtid_basic.result b/mysql-test/suite/rpl/r/rpl_gtid_basic.result
index e8e5bf36f84..465cc875af0 100644
--- a/mysql-test/suite/rpl/r/rpl_gtid_basic.result
+++ b/mysql-test/suite/rpl/r/rpl_gtid_basic.result
@@ -286,7 +286,7 @@ INSERT INTO t1 VALUES (4);
master_gtid_wait('2-1-2')
0
KILL CONNECTION KILL_ID;
-ERROR HY000: Lost connection to MySQL server during query
+Got one of the listed errors
SET gtid_domain_id=1;
SET gtid_seq_no=4;
INSERT INTO t1 VALUES (5);
@@ -386,7 +386,7 @@ SET GLOBAL slave_ddl_exec_mode=STRICT;
SET sql_slave_skip_counter=1;
START SLAVE UNTIL master_gtid_pos="3-1-100";
include/sync_with_master_gtid.inc
-include/wait_for_slave_sql_to_stop.inc
+include/wait_for_slave_to_stop.inc
SELECT * FROM t2;
ERROR 42S02: Table 'test.t2' doesn't exist
SELECT IF(LOCATE("3-1-100", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 3-1-100 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status;
diff --git a/mysql-test/suite/rpl/r/rpl_gtid_crash.result b/mysql-test/suite/rpl/r/rpl_gtid_crash.result
index 3417ad561f4..75bd9d0cbb1 100644
--- a/mysql-test/suite/rpl/r/rpl_gtid_crash.result
+++ b/mysql-test/suite/rpl/r/rpl_gtid_crash.result
@@ -3,6 +3,7 @@ include/rpl_init.inc [topology=1->2]
call mtr.add_suppression("Checking table:");
call mtr.add_suppression("client is using or hasn't closed the table properly");
call mtr.add_suppression("Table .* is marked as crashed and should be repaired");
+call mtr.add_suppression("InnoDB: Warning: database page corruption or a failed");
flush tables;
ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB;
@@ -133,9 +134,17 @@ SELECT @@GLOBAL.server_id;
3
SELECT * from t1 WHERE a > 10 ORDER BY a;
a
+gtid_check
+Binlog pos ok
# Wait 30 seconds for SQL thread to catch up with IO thread
SELECT * from t1 WHERE a > 10 ORDER BY a;
a
+gtid_check
+Binlog pos ok
+gtid_check
+Slave pos ok
+gtid_check
+Current pos ok
# Repeat this with additional transactions on the master
SET GLOBAL debug_dbug="+d,inject_error_writing_xid";
BEGIN;
@@ -175,11 +184,21 @@ SELECT * from t1 WHERE a > 10 ORDER BY a;
a
13
14
+gtid_check
+Binlog pos ok
+gtid_check
+Current pos ok
# Wait 30 seconds for SQL thread to catch up with IO thread
SELECT * from t1 WHERE a > 10 ORDER BY a;
a
13
14
+gtid_check
+Binlog pos ok
+gtid_check
+Slave pos ok
+gtid_check
+Current pos ok
# Repeat this with additional transactions on the master
SET GLOBAL debug_dbug="+d,inject_error_writing_xid";
BEGIN;
@@ -205,5 +224,48 @@ a
14
23
24
+# Repeat this with slave restart
+SET GLOBAL debug_dbug="+d,inject_error_writing_xid";
+BEGIN;
+INSERT INTO t1 VALUES (25);
+COMMIT;
+ERROR HY000: Error writing file 'master-bin' (errno: 28 "No space left on device")
+SET GLOBAL debug_dbug="+d,crash_dispatch_command_before";
+COMMIT;
+Got one of the listed errors
+# Wait 30 seconds for IO thread to connect and SQL thread to catch up
+# with IO thread.
+include/stop_slave.inc
+gtid_check
+Binlog pos ok
+gtid_check
+Current pos ok
+INSERT INTO t1 VALUES (26);
+INSERT INTO t1 VALUES (27);
+SELECT * from t1 WHERE a > 10 ORDER BY a;
+a
+13
+14
+23
+24
+26
+27
+include/save_master_gtid.inc
+gtid_check
+Binlog pos ok
+gtid_check
+Slave pos ok
+gtid_check
+Current pos ok
+include/start_slave.inc
+include/sync_with_master_gtid.inc
+SELECT * from t1 WHERE a > 10 ORDER BY a;
+a
+13
+14
+23
+24
+26
+27
DROP TABLE t1;
include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_mdev6386.result b/mysql-test/suite/rpl/r/rpl_mdev6386.result
index 352b9d07fef..fa49d9a9c03 100644
--- a/mysql-test/suite/rpl/r/rpl_mdev6386.result
+++ b/mysql-test/suite/rpl/r/rpl_mdev6386.result
@@ -22,6 +22,7 @@ INSERT INTO t2 VALUE (4, 1);
INSERT INTO t2 VALUE (5, 1);
INSERT INTO t1 SELECT * FROM t2;
DROP TEMPORARY TABLE t2;
+include/save_master_gtid.inc
Contents on master:
SELECT * FROM t1 ORDER BY a;
a b
@@ -41,6 +42,7 @@ SET sql_log_bin= 0;
DELETE FROM t1 WHERE a=1;
SET sql_log_bin= 1;
include/start_slave.inc
+include/sync_with_master_gtid.inc
Contents on slave after:
SELECT * FROM t1 ORDER BY a;
a b
diff --git a/mysql-test/suite/rpl/r/rpl_parallel.result b/mysql-test/suite/rpl/r/rpl_parallel.result
index ac21e7a3e01..7ceb5ee6622 100644
--- a/mysql-test/suite/rpl/r/rpl_parallel.result
+++ b/mysql-test/suite/rpl/r/rpl_parallel.result
@@ -972,6 +972,170 @@ SET GLOBAL binlog_format= @old_format;
SET GLOBAL slave_parallel_threads=0;
SET GLOBAL slave_parallel_threads=10;
include/start_slave.inc
+*** MDEV-7237: Parallel replication: incorrect relaylog position after stop/start the slave ***
+INSERT INTO t2 VALUES (40);
+include/stop_slave.inc
+CHANGE MASTER TO master_use_gtid=no;
+SET @old_dbug= @@GLOBAL.debug_dbug;
+SET GLOBAL debug_dbug="+d,rpl_parallel_scheduled_gtid_0_x_100";
+SET GLOBAL debug_dbug="+d,rpl_parallel_wait_for_done_trigger";
+SET GLOBAL slave_parallel_threads=0;
+SET GLOBAL slave_parallel_threads=10;
+INSERT INTO t2 VALUES (41);
+INSERT INTO t2 VALUES (42);
+SET @old_format= @@binlog_format;
+SET binlog_format= statement;
+DELETE FROM t2 WHERE a=40;
+SET binlog_format= @old_format;
+INSERT INTO t2 VALUES (43);
+INSERT INTO t2 VALUES (44);
+FLUSH LOGS;
+INSERT INTO t2 VALUES (45);
+SET gtid_seq_no=100;
+INSERT INTO t2 VALUES (46);
+BEGIN;
+SELECT * FROM t2 WHERE a=40 FOR UPDATE;
+a
+40
+include/start_slave.inc
+SET debug_sync= 'now WAIT_FOR scheduled_gtid_0_x_100';
+STOP SLAVE;
+SET debug_sync= 'now WAIT_FOR wait_for_done_waiting';
+ROLLBACK;
+include/wait_for_slave_sql_to_stop.inc
+SELECT * FROM t2 WHERE a >= 40 ORDER BY a;
+a
+41
+42
+include/start_slave.inc
+SELECT * FROM t2 WHERE a >= 40 ORDER BY a;
+a
+41
+42
+43
+44
+45
+46
+include/stop_slave.inc
+SET GLOBAL debug_dbug=@old_dbug;
+SET DEBUG_SYNC= 'RESET';
+SET GLOBAL slave_parallel_threads=0;
+SET GLOBAL slave_parallel_threads=10;
+CHANGE MASTER TO master_use_gtid=slave_pos;
+include/start_slave.inc
+*** MDEV-7326 Server deadlock in connection with parallel replication ***
+include/stop_slave.inc
+SET GLOBAL slave_parallel_threads=0;
+SET GLOBAL slave_parallel_threads=3;
+SET GLOBAL debug_dbug="+d,rpl_parallel_simulate_temp_err_xid";
+include/start_slave.inc
+SET @old_format= @@SESSION.binlog_format;
+SET binlog_format= STATEMENT;
+INSERT INTO t1 VALUES (foo(50,
+"rpl_parallel_start_waiting_for_prior SIGNAL t3_ready",
+"rpl_parallel_end_of_group SIGNAL prep_ready WAIT_FOR prep_cont"));
+SET DEBUG_SYNC= "now WAIT_FOR prep_ready";
+INSERT INTO t2 VALUES (foo(50,
+"rpl_parallel_simulate_temp_err_xid SIGNAL t1_ready1 WAIT_FOR t1_cont1",
+"rpl_parallel_retry_after_unmark SIGNAL t1_ready2 WAIT_FOR t1_cont2"));
+SET DEBUG_SYNC= "now WAIT_FOR t1_ready1";
+INSERT INTO t1 VALUES (foo(51,
+"rpl_parallel_before_mark_start_commit SIGNAL t2_ready1 WAIT_FOR t2_cont1",
+"rpl_parallel_after_mark_start_commit SIGNAL t2_ready2"));
+SET DEBUG_SYNC= "now WAIT_FOR t2_ready1";
+SET DEBUG_SYNC= "now SIGNAL t1_cont1";
+SET DEBUG_SYNC= "now WAIT_FOR t1_ready2";
+INSERT INTO t1 VALUES (52);
+SET BINLOG_FORMAT= @old_format;
+SELECT * FROM t2 WHERE a>=50 ORDER BY a;
+a
+50
+SELECT * FROM t1 WHERE a>=50 ORDER BY a;
+a
+50
+51
+52
+SET DEBUG_SYNC= "now SIGNAL prep_cont";
+SET DEBUG_SYNC= "now WAIT_FOR t3_ready";
+SET DEBUG_SYNC= "now SIGNAL t2_cont1";
+SET DEBUG_SYNC= "now WAIT_FOR t2_ready2";
+SET DEBUG_SYNC= "now SIGNAL t1_cont2";
+SELECT * FROM t2 WHERE a>=50 ORDER BY a;
+a
+50
+SELECT * FROM t1 WHERE a>=50 ORDER BY a;
+a
+50
+51
+52
+SET DEBUG_SYNC="reset";
+include/stop_slave.inc
+SET GLOBAL debug_dbug=@old_dbug;
+SET GLOBAL slave_parallel_threads=0;
+SET GLOBAL slave_parallel_threads=10;
+include/start_slave.inc
+*** MDEV-7326 Server deadlock in connection with parallel replication ***
+include/stop_slave.inc
+SET GLOBAL slave_parallel_threads=0;
+SET GLOBAL slave_parallel_threads=3;
+SET GLOBAL debug_dbug="+d,rpl_parallel_simulate_temp_err_xid";
+include/start_slave.inc
+SET @old_format= @@SESSION.binlog_format;
+SET binlog_format= STATEMENT;
+INSERT INTO t1 VALUES (foo(60,
+"rpl_parallel_start_waiting_for_prior SIGNAL t3_ready",
+"rpl_parallel_end_of_group SIGNAL prep_ready WAIT_FOR prep_cont"));
+SET DEBUG_SYNC= "now WAIT_FOR prep_ready";
+INSERT INTO t2 VALUES (foo(60,
+"rpl_parallel_simulate_temp_err_xid SIGNAL t1_ready1 WAIT_FOR t1_cont1",
+"rpl_parallel_retry_after_unmark SIGNAL t1_ready2 WAIT_FOR t1_cont2"));
+SET DEBUG_SYNC= "now WAIT_FOR t1_ready1";
+SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1';
+SET binlog_format=statement;
+INSERT INTO t1 VALUES (foo(61,
+"rpl_parallel_before_mark_start_commit SIGNAL t2_ready1 WAIT_FOR t2_cont1",
+"rpl_parallel_after_mark_start_commit SIGNAL t2_ready2"));
+SET debug_sync='now WAIT_FOR master_queued1';
+SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2';
+INSERT INTO t6 VALUES (62);
+SET debug_sync='now WAIT_FOR master_queued2';
+SET debug_sync='now SIGNAL master_cont1';
+SET debug_sync='RESET';
+SET BINLOG_FORMAT= @old_format;
+SELECT * FROM t2 WHERE a>=60 ORDER BY a;
+a
+60
+SELECT * FROM t1 WHERE a>=60 ORDER BY a;
+a
+60
+61
+SELECT * FROM t6 WHERE a>=60 ORDER BY a;
+a
+62
+SET DEBUG_SYNC= "now WAIT_FOR t2_ready1";
+SET DEBUG_SYNC= "now SIGNAL t1_cont1";
+SET DEBUG_SYNC= "now WAIT_FOR t1_ready2";
+SET DEBUG_SYNC= "now SIGNAL prep_cont";
+SET DEBUG_SYNC= "now WAIT_FOR t3_ready";
+SET DEBUG_SYNC= "now SIGNAL t2_cont1";
+SET DEBUG_SYNC= "now WAIT_FOR t2_ready2";
+SET DEBUG_SYNC= "now SIGNAL t1_cont2";
+SELECT * FROM t2 WHERE a>=60 ORDER BY a;
+a
+60
+SELECT * FROM t1 WHERE a>=60 ORDER BY a;
+a
+60
+61
+SELECT * FROM t6 WHERE a>=60 ORDER BY a;
+a
+62
+SET DEBUG_SYNC="reset";
+include/stop_slave.inc
+SET GLOBAL debug_dbug=@old_dbug;
+SET GLOBAL slave_parallel_threads=0;
+SET GLOBAL slave_parallel_threads=10;
+include/start_slave.inc
include/stop_slave.inc
SET GLOBAL slave_parallel_threads=@old_parallel_threads;
include/start_slave.inc
diff --git a/mysql-test/suite/rpl/r/rpl_parallel2.result b/mysql-test/suite/rpl/r/rpl_parallel2.result
index 49be484f419..8bf8b9caf3b 100644
--- a/mysql-test/suite/rpl/r/rpl_parallel2.result
+++ b/mysql-test/suite/rpl/r/rpl_parallel2.result
@@ -9,6 +9,7 @@ CALL mtr.add_suppression("Unsafe statement written to the binary log using state
INSERT INTO t1 VALUES (1,sleep(2));
Warnings:
Note 1592 Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. Statement is unsafe because it uses a system function that may return a different value on the slave.
+include/wait_for_slave_param.inc [Seconds_Behind_Master]
Seconds_Behind_Master should be zero here because the slave is fully caught up and idle.
Seconds_Behind_Master = '0'
include/stop_slave.inc
diff --git a/mysql-test/suite/rpl/t/myisam_external_lock-slave.opt b/mysql-test/suite/rpl/t/myisam_external_lock-slave.opt
new file mode 100644
index 00000000000..db53e17d4b3
--- /dev/null
+++ b/mysql-test/suite/rpl/t/myisam_external_lock-slave.opt
@@ -0,0 +1,2 @@
+--log-slave-updates=0
+--skip_external_locking=0
diff --git a/mysql-test/suite/rpl/t/myisam_external_lock.test b/mysql-test/suite/rpl/t/myisam_external_lock.test
new file mode 100644
index 00000000000..14824fd8321
--- /dev/null
+++ b/mysql-test/suite/rpl/t/myisam_external_lock.test
@@ -0,0 +1,24 @@
+--source include/have_binlog_format_row.inc
+--source include/master-slave.inc
+
+#
+# MDEV-6871 Multi-value insert on MyISAM table that makes slaves crash
+# This only happens if external_lock is enabled
+#
+
+drop table if exists t1;
+CREATE TABLE `t1` (`col1` int(11) NOT NULL,`col2` int(11) NOT NULL,
+ PRIMARY KEY (`col1`,`col2`),
+ KEY `col2` (`col2`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1;
+INSERT INTO `t1` VALUES (2775,974),(2775,975),(2775,976),(2778,977),(2778,978),(2782,979),(2790,986),(2790,1139),(2792,840),(2792,984),(2792,989),(2793,982),(2793,992),(2793,993),(2793,994),(2795,323),(2795,332),(2797,980),(2797,997),(2797,998),(2798,1103),(2798,1104),(2799,841),(2799,985),(2799,988),(2833,983),(2833,990),(2833,991),(2834,981),(2834,995),(2834,996),(2835,316),(2835,317),(3007,854),(3007,856),(3008,855),(3008,857),(3009,823),(3009,824),(3014,1),(3015,1),(3016,2),(3017,2),(3018,3),(3019,3),(3024,842),(3024,843),(3024,844),(3025,845),(3025,846),(3025,847),(3040,31),(3041,32),(3042,52),(3042,55),(3043,53),(3043,54),(3044,278),(3044,279),(3044,280),(3044,281),(3044,282),(3044,283),(3044,284),(3044,285),(3045,1),(3046,1),(3049,220),(3050,221),(3050,222),(3051,2),(3052,2),(3053,223),(3054,224),(3055,225),(3056,226),(3057,227),(3058,228),(3059,229),(3060,327),(3066,236),(3067,237),(3068,238),(3069,239),(3070,240),(3080,241),(3081,242),(3082,247),(3083,248),(3084,249),(3085,250),(3086,251),(3087,252),(3088,253),(3089,254),(3090,255),(3091,256),(3092,257),(3093,258),(3094,259),(3096,263),(3097,264),(3100,273),(3100,302),(3101,266),(3102,267),(3103,268),(3104,269),(3105,270),(3111,275),(3112,238),(3113,272),(3115,286),(3116,318),(3116,319),(3117,290),(3117,292),(3118,238),(3119,291),(3119,293),(3120,304),(3121,305),(3122,306),(3123,307),(3124,308),(3125,309),(3126,310),(3127,311),(3128,312),(3128,336),(3129,313),(3129,350),(3130,314),(3131,315),(3131,351),(3132,325),(3132,328),(3134,502),(3138,334),(3139,338),(3139,339),(3140,340),(3140,341),(3141,344),(3141,345),(3142,346),(3142,347),(3149,351),(3149,354),(3150,351),(3150,356),(3152,358),(3152,359),(3153,361),(3153,370),(3154,363),(3154,369),(3156,350),(3156,371),(3159,376),(3160,377),(3160,379),(3160,384),(3161,378),(3161,380),(3161,383),(3162,388),(3162,389),(3162,390),(3169,392),(3169,393),(3169,394),(3170,395),(3170,396),(3170,397),(3171,398),(3171,399),(3171,400),(3172,401),(3172,402),(3172,403),(3173,404),(3173,405),(3173,406),(3178,351),(3178,421),(3190,411),(3190,412),(3191,413),(3191,414),(3192,415),(3192,416),(3193,417),(3193,418),(3194,419),(3194,420),(3195,353),(3195,424),(3196,425),(3196,426),(3197,427),(3197,428),(3198,429),(3198,430),(3199,431),(3199,432),(3200,433),(3200,434),(3201,435),(3201,436),(3202,437),(3202,438),(3203,439),(3203,440),(3204,441),(3204,442),(3205,443),(3205,444),(3206,445),(3206,446),(3207,447),(3207,448),(3208,449),(3208,450),(3209,451),(3209,452),(3210,453),(3210,454),(3211,455),(3211,456),(3212,457),(3212,458),(3213,459),(3213,460),(3214,461),(3214,462),(3215,463),(3215,464),(3218,466),(3218,467),(3218,468),(3219,469),(3219,470),(3219,471),(3220,474),(3220,475),(3220,476),(3221,477),(3221,478),(3221,479),(3222,480),(3222,481),(3223,482),(3223,483),(3224,484),(3224,485),(3225,486),(3225,487),(3227,503),(3227,505),(3228,506),(3228,507),(3230,508),(3230,509),(3231,510),(3231,511),(3232,512),(3232,513),(3233,514),(3233,515),(3234,516),(3234,517),(3235,518),(3235,519),(3237,521),(3237,522),(3239,524),(3239,525),(3240,526),(3240,527),(3241,528),(3241,529),(3242,530),(3242,531),(3243,532),(3243,533),(3244,534),(3244,535),(3245,536),(3245,537),(3246,538),(3246,539),(3252,540),(3252,541),(3254,543),(3254,544),(3254,545),(3255,547),(3255,548),(3255,571),(3256,550),(3256,551),(3256,572),(3257,553),(3257,554),(3257,573),(3258,556),(3258,557),(3258,574),(3259,559),(3259,560),(3259,575),(3260,561),(3260,562),(3260,563),(3261,565),(3261,576),(3262,566),(3262,567),(3263,568),(3263,569),(3263,570),(3264,577),(3264,578),(3265,579),(3265,580),(3266,581),(3266,582),(3266,591),(3267,583),(3267,584),(3267,592),(3268,585),(3268,586),(3268,593),(3269,587),(3269,588),(3269,594),(3270,589),(3270,590),(3271,595),(3271,596),(3271,597),(3272,598),(3272,599),(3273,600),(3273,601),(3273,602),(3274,603),(3274,604),(3274,605),(3275,606),(3275,607),(3275,608),(3276,609),(3276,610),(3276,611),(3277,612),(3277,613),(3277,614),(3278,615),(3278,616),(3279,617),(3279,618),(3279,619),(3279,628),(3279,629),(3280,620),(3280,621),(3280,622),(3281,623),(3281,624),(3281,625),(3282,626),(3282,825),(3283,630),(3283,631),(3284,632),(3284,633),(3284,634),(3285,635),(3285,940),(3286,638),(3286,639),(3286,640),(3287,641),(3287,642),(3287,643),(3288,644),(3288,645),(3288,646),(3289,647),(3289,648),(3289,649),(3290,650),(3290,651),(3290,652),(3291,653),(3291,654),(3291,655),(3292,656),(3292,657),(3292,658),(3293,659),(3293,660),(3293,661),(3294,662),(3294,663),(3294,664),(3295,665),(3295,666),(3295,667),(3296,668),(3296,669),(3296,670),(3297,671),(3297,672),(3297,673),(3298,674),(3298,675),(3298,676),(3299,677),(3299,678),(3299,679),(3300,680),(3300,681),(3300,682),(3301,683),(3301,684),(3301,685),(3302,686),(3302,687),(3302,688),(3303,689),(3303,690),(3303,691),(3304,692),(3304,693),(3304,694),(3305,695),(3305,696),(3305,697),(3306,698),(3306,699),(3306,700),(3307,701),(3307,702),(3307,703),(3308,704),(3308,705),(3308,706),(3309,707),(3309,708),(3310,709),(3310,710),(3311,711),(3311,712),(3311,713),(3312,714),(3312,715),(3312,716),(3313,717),(3313,1167),(3314,720),(3314,721),(3314,722),(3315,723),(3315,724),(3315,725),(3316,726),(3316,727),(3316,728),(3317,729),(3317,730),(3317,731),(3318,732),(3318,733),(3318,734),(3319,735),(3319,736),(3319,737),(3320,738),(3320,739),(3320,740),(3321,741),(3321,742),(3322,743),(3322,744),(3323,745),(3323,746),(3323,747),(3324,748),(3324,749),(3324,750),(3325,751),(3325,752),(3325,753),(3326,754),(3326,755),(3327,756),(3327,757),(3328,758),(3328,789),(3329,761),(3329,790),(3330,762),(3330,763),(3331,768),(3331,785),(3331,786),(3332,769),(3332,783),(3332,784),(3335,766),(3336,767),(3343,770),(3343,771),(3344,772),(3344,773),(3345,774),(3345,775),(3347,776),(3347,777),(3347,987),(3348,778),(3348,779),(3349,780),(3372,781),(3372,782),(3373,787),(3373,788),(3376,791),(3376,792),(3377,793),(3377,794),(3378,799),(3378,800),(3379,801),(3379,802),(3380,795),(3380,796),(3381,797),(3381,798),(3383,805),(3384,806),(3384,807),(3385,808),(3385,809),(3386,810),(3386,811),(3387,812),(3387,814),(3388,815),(3388,816),(3391,817),(3391,818),(3391,819),(3392,820),(3392,821),(3392,822),(3393,826),(3393,827),(3394,828),(3394,829),(3395,830),(3395,831),(3396,834),(3396,835),(3397,832),(3397,833),(3398,836),(3398,837),(3399,838),(3399,839),(3410,850),(3410,851),(3411,852),(3411,853),(3412,848),(3412,849),(3419,860),(3419,951),(3420,859),(3420,861),(3422,862),(3422,863),(3423,864),(3423,865),(3424,866),(3424,867),(3424,872),(3424,873),(3425,868),(3425,869),(3425,874),(3425,875),(3426,878),(3426,879),(3427,876),(3427,877),(3428,880),(3432,884),(3432,885),(3432,886),(3434,887),(3434,888),(3434,889),(3441,894),(3441,895),(3442,896),(3442,897),(3444,904),(3445,905),(3449,906),(3449,907),(3450,908),(3450,909),(3453,910),(3458,915),(3458,916),(3459,917),(3459,918),(3463,919),(3463,920),(3485,929),(3486,930),(3487,931),(3488,932),(3489,933),(3493,2),(3494,2),(3501,934),(3502,936),(3503,938),(3504,939),(3505,941),(3506,942),(3507,943),(3508,944),(3509,945),(3510,946),(3511,947),(3512,948),(3514,949),(3514,950),(3515,953),(3516,954),(3517,955),(3518,956),(3519,957),(3520,958),(3521,959),(3527,960),(3527,965),(3528,961),(3528,962),(3529,963),(3529,964),(3530,966),(3530,967),(3531,968),(3531,969),(3535,970),(3535,971),(3536,972),(3536,973),(3540,999),(3540,1000),(3541,1001),(8888,9999);
+
+drop table t1;
+
+save_master_pos;
+connection slave;
+sync_with_master;
+
+connection master;
+
+--source include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_gtid_basic.test b/mysql-test/suite/rpl/t/rpl_gtid_basic.test
index 5ecff519aef..19f90fce197 100644
--- a/mysql-test/suite/rpl/t/rpl_gtid_basic.test
+++ b/mysql-test/suite/rpl/t/rpl_gtid_basic.test
@@ -334,7 +334,7 @@ reap;
eval KILL CONNECTION $kill2_id;
--connection s6
---error 2013
+--error 2013,ER_CONNECTION_KILLED
reap;
--connection server_1
@@ -456,7 +456,7 @@ SET sql_slave_skip_counter=1;
START SLAVE UNTIL master_gtid_pos="3-1-100";
--let $master_pos=3-1-100
--source include/sync_with_master_gtid.inc
---source include/wait_for_slave_sql_to_stop.inc
+--source include/wait_for_slave_to_stop.inc
--error ER_NO_SUCH_TABLE
SELECT * FROM t2;
SELECT IF(LOCATE("3-1-100", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 3-1-100 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status;
diff --git a/mysql-test/suite/rpl/t/rpl_gtid_crash-slave.opt b/mysql-test/suite/rpl/t/rpl_gtid_crash-slave.opt
new file mode 100644
index 00000000000..69c1a64e388
--- /dev/null
+++ b/mysql-test/suite/rpl/t/rpl_gtid_crash-slave.opt
@@ -0,0 +1 @@
+--master-retry-count=100
diff --git a/mysql-test/suite/rpl/t/rpl_gtid_crash.test b/mysql-test/suite/rpl/t/rpl_gtid_crash.test
index 0caad2a12fe..90b4e454a74 100644
--- a/mysql-test/suite/rpl/t/rpl_gtid_crash.test
+++ b/mysql-test/suite/rpl/t/rpl_gtid_crash.test
@@ -12,6 +12,11 @@
call mtr.add_suppression("Checking table:");
call mtr.add_suppression("client is using or hasn't closed the table properly");
call mtr.add_suppression("Table .* is marked as crashed and should be repaired");
+# We have seen this warning a couple of times in Buildbot. Since we crash the
+# server deliberately, it seems possible that we could in rare cases crash in
+# the middle of a page write. The page is recovered from the doublewrite
+# buffer ("[Note] InnoDB: Recovered the page from the doublewrite buffer.").
+call mtr.add_suppression("InnoDB: Warning: database page corruption or a failed");
flush tables;
ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
@@ -210,6 +215,7 @@ EOF
wait
EOF
SET GLOBAL debug_dbug="+d,inject_crash_before_flush_rli";
+--error 0,2006,2013
START SLAVE;
--connection server_1
@@ -234,6 +240,7 @@ EOF
wait
EOF
SET GLOBAL debug_dbug="+d,inject_crash_after_flush_rli";
+--error 0,2006,2013
START SLAVE;
--connection server_1
@@ -269,6 +276,7 @@ SET GLOBAL debug_dbug="+d,crash_before_writing_xid";
--connection server_1
INSERT INTO t1 VALUES (9), (10);
+--let $saved_gtid=`SELECT @@last_gtid`
--save_master_pos
--connection server_2
@@ -333,6 +341,9 @@ EOF
SELECT @@GLOBAL.server_id;
SELECT * from t1 WHERE a > 10 ORDER BY a;
+--disable_query_log
+eval SELECT IF(INSTR(@@gtid_binlog_pos, '$saved_gtid'), "Binlog pos ok", CONCAT("Unexpected binlog pos: ", @@gtid_binlog_pos, "; does not contain the GTID $saved_gtid.")) AS gtid_check;
+--enable_query_log
--echo # Wait 30 seconds for SQL thread to catch up with IO thread
--connection server_2
@@ -357,6 +368,11 @@ if ($read_log_pos != $exec_log_pos)
}
SELECT * from t1 WHERE a > 10 ORDER BY a;
+--disable_query_log
+eval SELECT IF(INSTR(@@gtid_binlog_pos, '$saved_gtid'), "Binlog pos ok", CONCAT("Unexpected binlog pos: ", @@gtid_binlog_pos, "; does not contain the GTID $saved_gtid.")) AS gtid_check;
+eval SELECT IF(INSTR(@@gtid_slave_pos, '$saved_gtid'), "Slave pos ok", CONCAT("Unexpected slave pos: ", @@gtid_slave_pos, "; does not contain the GTID $saved_gtid.")) AS gtid_check;
+eval SELECT IF(INSTR(@@gtid_current_pos, '$saved_gtid'), "Current pos ok", CONCAT("Unexpected current pos: ", @@gtid_current_pos, "; does not contain the GTID $saved_gtid.")) AS gtid_check;
+--enable_query_log
--echo # Repeat this with additional transactions on the master
@@ -387,6 +403,7 @@ EOF
SELECT @@GLOBAL.server_id;
INSERT INTO t1 VALUES (13);
INSERT INTO t1 VALUES (14);
+--let $saved_gtid=`SELECT @@last_gtid`
SELECT * from t1 WHERE a > 10 ORDER BY a;
--source include/save_master_gtid.inc
@@ -420,6 +437,10 @@ EOF
SELECT @@GLOBAL.server_id;
SELECT * from t1 WHERE a > 10 ORDER BY a;
+--disable_query_log
+eval SELECT IF(INSTR(@@gtid_binlog_pos, '$saved_gtid'), "Binlog pos ok", CONCAT("Unexpected binlog pos: ", @@gtid_binlog_pos, "; does not contain the GTID $saved_gtid.")) AS gtid_check;
+eval SELECT IF(INSTR(@@gtid_current_pos, '$saved_gtid'), "Current pos ok", CONCAT("Unexpected current pos: ", @@gtid_current_pos, "; does not contain the GTID $saved_gtid.")) AS gtid_check;
+--enable_query_log
--echo # Wait 30 seconds for SQL thread to catch up with IO thread
--connection server_2
@@ -444,6 +465,11 @@ if ($read_log_pos != $exec_log_pos)
}
SELECT * from t1 WHERE a > 10 ORDER BY a;
+--disable_query_log
+eval SELECT IF(INSTR(@@gtid_binlog_pos, '$saved_gtid'), "Binlog pos ok", CONCAT("Unexpected binlog pos: ", @@gtid_binlog_pos, "; does not contain the GTID $saved_gtid.")) AS gtid_check;
+eval SELECT IF(INSTR(@@gtid_slave_pos, '$saved_gtid'), "Slave pos ok", CONCAT("Unexpected slave pos: ", @@gtid_slave_pos, "; does not contain the GTID $saved_gtid.")) AS gtid_check;
+eval SELECT IF(INSTR(@@gtid_current_pos, '$saved_gtid'), "Current pos ok", CONCAT("Unexpected current pos: ", @@gtid_current_pos, "; does not contain the GTID $saved_gtid.")) AS gtid_check;
+--enable_query_log
--echo # Repeat this with additional transactions on the master
@@ -472,6 +498,7 @@ EOF
INSERT INTO t1 VALUES (23);
INSERT INTO t1 VALUES (24);
+--let $saved_gtid=`SELECT @@last_gtid`
SELECT * from t1 WHERE a > 10 ORDER BY a;
--source include/save_master_gtid.inc
@@ -479,6 +506,86 @@ SELECT * from t1 WHERE a > 10 ORDER BY a;
--source include/sync_with_master_gtid.inc
SELECT * from t1 WHERE a > 10 ORDER BY a;
+--echo # Repeat this with slave restart
+
+--connection server_1
+--write_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+wait
+EOF
+
+SET GLOBAL debug_dbug="+d,inject_error_writing_xid";
+BEGIN;
+INSERT INTO t1 VALUES (25);
+--error ER_ERROR_ON_WRITE
+COMMIT;
+SET GLOBAL debug_dbug="+d,crash_dispatch_command_before";
+--error 2006,2013
+COMMIT;
+
+--source include/wait_until_disconnected.inc
+
+--append_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+restart
+EOF
+
+--connection server_1
+--enable_reconnect
+--source include/wait_until_connected_again.inc
+
+--connection server_2
+--echo # Wait 30 seconds for IO thread to connect and SQL thread to catch up
+--echo # with IO thread.
+--let $wait_timeout= 300
+while ($wait_timeout != 0)
+{
+ --let $connected=`SELECT COUNT(*) > 0 FROM information_schema.processlist WHERE State = 'Waiting for master to send event'`
+ if ($connected)
+ {
+ --let $read_log_pos= query_get_value('SHOW SLAVE STATUS', Read_Master_Log_Pos, 1)
+ --let $exec_log_pos= query_get_value('SHOW SLAVE STATUS', Exec_Master_Log_Pos, 1)
+ if ($read_log_pos == $exec_log_pos)
+ {
+ --let $wait_timeout= 0
+ }
+ if ($read_log_pos != $exec_log_pos)
+ {
+ --sleep 0.1
+ --dec $wait_timeout
+ }
+ }
+ if (!$connected)
+ {
+ --sleep 0.1
+ --dec $wait_timeout
+ }
+}
+if (`SELECT NOT $connected OR $read_log_pos != $exec_log_pos`)
+{
+ --die Timeout wait for IO thread to connect and SQL thread to catch up with IO thread
+}
+
+--source include/stop_slave.inc
+
+--connection server_1
+--disable_query_log
+eval SELECT IF(INSTR(@@gtid_binlog_pos, '$saved_gtid'), "Binlog pos ok", CONCAT("Unexpected binlog pos: ", @@gtid_binlog_pos, "; does not contain the GTID $saved_gtid.")) AS gtid_check;
+eval SELECT IF(INSTR(@@gtid_current_pos, '$saved_gtid'), "Current pos ok", CONCAT("Unexpected current pos: ", @@gtid_current_pos, "; does not contain the GTID $saved_gtid.")) AS gtid_check;
+--enable_query_log
+INSERT INTO t1 VALUES (26);
+INSERT INTO t1 VALUES (27);
+SELECT * from t1 WHERE a > 10 ORDER BY a;
+--source include/save_master_gtid.inc
+
+--connection server_2
+--disable_query_log
+eval SELECT IF(INSTR(@@gtid_binlog_pos, '$saved_gtid'), "Binlog pos ok", CONCAT("Unexpected binlog pos: ", @@gtid_binlog_pos, "; does not contain the GTID $saved_gtid.")) AS gtid_check;
+eval SELECT IF(INSTR(@@gtid_slave_pos, '$saved_gtid'), "Slave pos ok", CONCAT("Unexpected slave pos: ", @@gtid_slave_pos, "; does not contain the GTID $saved_gtid.")) AS gtid_check;
+eval SELECT IF(INSTR(@@gtid_current_pos, '$saved_gtid'), "Current pos ok", CONCAT("Unexpected current pos: ", @@gtid_current_pos, "; does not contain the GTID $saved_gtid.")) AS gtid_check;
+--enable_query_log
+--source include/start_slave.inc
+--source include/sync_with_master_gtid.inc
+SELECT * from t1 WHERE a > 10 ORDER BY a;
+
--connection server_1
DROP TABLE t1;
diff --git a/mysql-test/suite/rpl/t/rpl_mdev6386.test b/mysql-test/suite/rpl/t/rpl_mdev6386.test
index 5513d15a77d..3e4e79ea5a3 100644
--- a/mysql-test/suite/rpl/t/rpl_mdev6386.test
+++ b/mysql-test/suite/rpl/t/rpl_mdev6386.test
@@ -31,7 +31,7 @@ INSERT INTO t2 VALUE (4, 1);
INSERT INTO t2 VALUE (5, 1);
INSERT INTO t1 SELECT * FROM t2;
DROP TEMPORARY TABLE t2;
---save_master_pos
+--source include/save_master_gtid.inc
--echo Contents on master:
SELECT * FROM t1 ORDER BY a;
@@ -56,7 +56,7 @@ DELETE FROM t1 WHERE a=1;
SET sql_log_bin= 1;
--source include/start_slave.inc
---sync_with_master
+--source include/sync_with_master_gtid.inc
--echo Contents on slave after:
SELECT * FROM t1 ORDER BY a;
diff --git a/mysql-test/suite/rpl/t/rpl_parallel.test b/mysql-test/suite/rpl/t/rpl_parallel.test
index d3ec08f5508..d4b99d4b0f7 100644
--- a/mysql-test/suite/rpl/t/rpl_parallel.test
+++ b/mysql-test/suite/rpl/t/rpl_parallel.test
@@ -1535,6 +1535,315 @@ SET GLOBAL slave_parallel_threads=10;
--source include/start_slave.inc
+--echo *** MDEV-7237: Parallel replication: incorrect relaylog position after stop/start the slave ***
+--connection server_1
+INSERT INTO t2 VALUES (40);
+--save_master_pos
+
+--connection server_2
+--sync_with_master
+--source include/stop_slave.inc
+CHANGE MASTER TO master_use_gtid=no;
+SET @old_dbug= @@GLOBAL.debug_dbug;
+# This DBUG injection causes a DEBUG_SYNC signal "scheduled_gtid_0_x_100" when
+# GTID 0-1-100 has been scheduled for and fetched by a worker thread.
+SET GLOBAL debug_dbug="+d,rpl_parallel_scheduled_gtid_0_x_100";
+# This DBUG injection causes a DEBUG_SYNC signal "wait_for_done_waiting" when
+# STOP SLAVE has signalled all worker threads to stop.
+SET GLOBAL debug_dbug="+d,rpl_parallel_wait_for_done_trigger";
+# Reset worker threads to make DBUG setting catch on.
+SET GLOBAL slave_parallel_threads=0;
+SET GLOBAL slave_parallel_threads=10;
+
+
+--connection server_1
+# Setup some transaction for the slave to replicate.
+INSERT INTO t2 VALUES (41);
+INSERT INTO t2 VALUES (42);
+# Need to log the DELETE in statement format, so we can see it in processlist.
+SET @old_format= @@binlog_format;
+SET binlog_format= statement;
+DELETE FROM t2 WHERE a=40;
+SET binlog_format= @old_format;
+INSERT INTO t2 VALUES (43);
+INSERT INTO t2 VALUES (44);
+# Force the slave to switch to a new relay log file.
+FLUSH LOGS;
+INSERT INTO t2 VALUES (45);
+# Inject a GTID 0-1-100, which will trigger a DEBUG_SYNC signal when this
+# transaction has been fetched by a worker thread.
+SET gtid_seq_no=100;
+INSERT INTO t2 VALUES (46);
+--save_master_pos
+
+--connection con_temp2
+# Temporarily block the DELETE on a=40 from completing.
+BEGIN;
+SELECT * FROM t2 WHERE a=40 FOR UPDATE;
+
+
+--connection server_2
+--source include/start_slave.inc
+
+# Wait for a worker thread to start on the DELETE that will be blocked
+# temporarily by the SELECT FOR UPDATE.
+--let $wait_condition= SELECT count(*) > 0 FROM information_schema.processlist WHERE state='updating' and info LIKE '%DELETE FROM t2 WHERE a=40%'
+--source include/wait_condition.inc
+
+# The DBUG injection set above will make the worker thread signal the following
+# debug_sync when the GTID 0-1-100 has been reached by a worker thread.
+# Thus, at this point, the SQL driver thread has reached the next
+# relay log file name, while a worker thread is still processing a
+# transaction in the previous relay log file, blocked on the SELECT FOR
+# UPDATE.
+SET debug_sync= 'now WAIT_FOR scheduled_gtid_0_x_100';
+# At this point, the SQL driver thread is in the new relay log file, while
+# the DELETE from the old relay log file is not yet complete. We will stop
+# the slave at this point. The bug was that the DELETE statement would
+# update the slave position to the _new_ relay log file name instead of
+# its own old file name. Thus, by stoping and restarting the slave at this
+# point, we would get an error at restart due to incorrect position. (If
+# we would let the slave catch up before stopping, the incorrect position
+# would be corrected by a later transaction).
+
+send STOP SLAVE;
+
+--connection con_temp2
+# Wait for STOP SLAVE to have proceeded sufficiently that it has signalled
+# all worker threads to stop; this ensures that we will stop after the DELETE
+# transaction (and not after a later transaction that might have been able
+# to set a fixed position).
+SET debug_sync= 'now WAIT_FOR wait_for_done_waiting';
+# Now release the row lock that was blocking the replication of DELETE.
+ROLLBACK;
+
+--connection server_2
+reap;
+--source include/wait_for_slave_sql_to_stop.inc
+SELECT * FROM t2 WHERE a >= 40 ORDER BY a;
+# Now restart the slave. With the bug present, this would start at an
+# incorrect relay log position, causing relay log read error (or if unlucky,
+# silently skip a number of events).
+--source include/start_slave.inc
+--sync_with_master
+SELECT * FROM t2 WHERE a >= 40 ORDER BY a;
+--source include/stop_slave.inc
+SET GLOBAL debug_dbug=@old_dbug;
+SET DEBUG_SYNC= 'RESET';
+SET GLOBAL slave_parallel_threads=0;
+SET GLOBAL slave_parallel_threads=10;
+CHANGE MASTER TO master_use_gtid=slave_pos;
+--source include/start_slave.inc
+
+
+--echo *** MDEV-7326 Server deadlock in connection with parallel replication ***
+# We use three transactions, each in a separate group commit.
+# T1 does mark_start_commit(), then gets a deadlock error.
+# T2 wakes up and starts running
+# T1 does unmark_start_commit()
+# T3 goes to wait for T2 to start its commit
+# T2 does mark_start_commit()
+# The bug was that at this point, T3 got deadlocked. Because T1 has unmarked(),
+# T3 did not yet see the count_committing_event_groups reach its target value
+# yet. But when T1 later re-did mark_start_commit(), it failed to send a wakeup
+# to T3.
+
+--connection server_2
+--source include/stop_slave.inc
+SET GLOBAL slave_parallel_threads=0;
+SET GLOBAL slave_parallel_threads=3;
+SET GLOBAL debug_dbug="+d,rpl_parallel_simulate_temp_err_xid";
+--source include/start_slave.inc
+
+--connection server_1
+SET @old_format= @@SESSION.binlog_format;
+SET binlog_format= STATEMENT;
+# This debug_sync will linger on and be used to control T3 later.
+INSERT INTO t1 VALUES (foo(50,
+ "rpl_parallel_start_waiting_for_prior SIGNAL t3_ready",
+ "rpl_parallel_end_of_group SIGNAL prep_ready WAIT_FOR prep_cont"));
+--save_master_pos
+--connection server_2
+# Wait for the debug_sync point for T3 to be set. But let the preparation
+# transaction remain hanging, so that T1 and T2 will be scheduled for the
+# remaining two worker threads.
+SET DEBUG_SYNC= "now WAIT_FOR prep_ready";
+
+--connection server_1
+INSERT INTO t2 VALUES (foo(50,
+ "rpl_parallel_simulate_temp_err_xid SIGNAL t1_ready1 WAIT_FOR t1_cont1",
+ "rpl_parallel_retry_after_unmark SIGNAL t1_ready2 WAIT_FOR t1_cont2"));
+--save_master_pos
+
+--connection server_2
+SET DEBUG_SYNC= "now WAIT_FOR t1_ready1";
+# T1 has now done mark_start_commit(). It will later do a rollback and retry.
+
+--connection server_1
+# Use a MyISAM table for T2 and T3, so they do not trigger the
+# rpl_parallel_simulate_temp_err_xid DBUG insertion on XID event.
+INSERT INTO t1 VALUES (foo(51,
+ "rpl_parallel_before_mark_start_commit SIGNAL t2_ready1 WAIT_FOR t2_cont1",
+ "rpl_parallel_after_mark_start_commit SIGNAL t2_ready2"));
+
+--connection server_2
+SET DEBUG_SYNC= "now WAIT_FOR t2_ready1";
+# T2 has now started running, but has not yet done mark_start_commit()
+SET DEBUG_SYNC= "now SIGNAL t1_cont1";
+SET DEBUG_SYNC= "now WAIT_FOR t1_ready2";
+# T1 has now done unmark_start_commit() in preparation for its retry.
+
+--connection server_1
+INSERT INTO t1 VALUES (52);
+SET BINLOG_FORMAT= @old_format;
+SELECT * FROM t2 WHERE a>=50 ORDER BY a;
+SELECT * FROM t1 WHERE a>=50 ORDER BY a;
+
+--connection server_2
+# Let the preparation transaction complete, so that the same worker thread
+# can continue with the transaction T3.
+SET DEBUG_SYNC= "now SIGNAL prep_cont";
+SET DEBUG_SYNC= "now WAIT_FOR t3_ready";
+# T3 has now gone to wait for T2 to start committing
+SET DEBUG_SYNC= "now SIGNAL t2_cont1";
+SET DEBUG_SYNC= "now WAIT_FOR t2_ready2";
+# T2 has now done mark_start_commit().
+# Let things run, and check that T3 does not get deadlocked.
+SET DEBUG_SYNC= "now SIGNAL t1_cont2";
+--sync_with_master
+
+--connection server_1
+--save_master_pos
+--connection server_2
+--sync_with_master
+SELECT * FROM t2 WHERE a>=50 ORDER BY a;
+SELECT * FROM t1 WHERE a>=50 ORDER BY a;
+SET DEBUG_SYNC="reset";
+
+# Re-spawn the worker threads to remove any DBUG injections or DEBUG_SYNC.
+--source include/stop_slave.inc
+SET GLOBAL debug_dbug=@old_dbug;
+SET GLOBAL slave_parallel_threads=0;
+SET GLOBAL slave_parallel_threads=10;
+--source include/start_slave.inc
+
+
+--echo *** MDEV-7326 Server deadlock in connection with parallel replication ***
+# Similar to the previous test, but with T2 and T3 in the same GCO.
+# We use three transactions, T1 in one group commit and T2/T3 in another.
+# T1 does mark_start_commit(), then gets a deadlock error.
+# T2 wakes up and starts running
+# T1 does unmark_start_commit()
+# T3 goes to wait for T1 to start its commit
+# T2 does mark_start_commit()
+# The bug was that at this point, T3 got deadlocked. T2 increments the
+# count_committing_event_groups but does not signal T3, as they are in
+# the same GCO. Then later when T1 increments, it would also not signal
+# T3, because now the count_committing_event_groups is not equal to the
+# wait_count of T3 (it is one larger).
+
+--connection server_2
+--source include/stop_slave.inc
+SET GLOBAL slave_parallel_threads=0;
+SET GLOBAL slave_parallel_threads=3;
+SET GLOBAL debug_dbug="+d,rpl_parallel_simulate_temp_err_xid";
+--source include/start_slave.inc
+
+--connection server_1
+SET @old_format= @@SESSION.binlog_format;
+SET binlog_format= STATEMENT;
+# This debug_sync will linger on and be used to control T3 later.
+INSERT INTO t1 VALUES (foo(60,
+ "rpl_parallel_start_waiting_for_prior SIGNAL t3_ready",
+ "rpl_parallel_end_of_group SIGNAL prep_ready WAIT_FOR prep_cont"));
+--save_master_pos
+--connection server_2
+# Wait for the debug_sync point for T3 to be set. But let the preparation
+# transaction remain hanging, so that T1 and T2 will be scheduled for the
+# remaining two worker threads.
+SET DEBUG_SYNC= "now WAIT_FOR prep_ready";
+
+--connection server_1
+INSERT INTO t2 VALUES (foo(60,
+ "rpl_parallel_simulate_temp_err_xid SIGNAL t1_ready1 WAIT_FOR t1_cont1",
+ "rpl_parallel_retry_after_unmark SIGNAL t1_ready2 WAIT_FOR t1_cont2"));
+--save_master_pos
+
+--connection server_2
+SET DEBUG_SYNC= "now WAIT_FOR t1_ready1";
+# T1 has now done mark_start_commit(). It will later do a rollback and retry.
+
+# Do T2 and T3 in a single group commit.
+# Use a MyISAM table for T2 and T3, so they do not trigger the
+# rpl_parallel_simulate_temp_err_xid DBUG insertion on XID event.
+--connection con_temp3
+SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1';
+SET binlog_format=statement;
+send INSERT INTO t1 VALUES (foo(61,
+ "rpl_parallel_before_mark_start_commit SIGNAL t2_ready1 WAIT_FOR t2_cont1",
+ "rpl_parallel_after_mark_start_commit SIGNAL t2_ready2"));
+
+--connection server_1
+SET debug_sync='now WAIT_FOR master_queued1';
+
+--connection con_temp4
+SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2';
+send INSERT INTO t6 VALUES (62);
+
+--connection server_1
+SET debug_sync='now WAIT_FOR master_queued2';
+SET debug_sync='now SIGNAL master_cont1';
+
+--connection con_temp3
+REAP;
+--connection con_temp4
+REAP;
+
+--connection server_1
+SET debug_sync='RESET';
+SET BINLOG_FORMAT= @old_format;
+SELECT * FROM t2 WHERE a>=60 ORDER BY a;
+SELECT * FROM t1 WHERE a>=60 ORDER BY a;
+SELECT * FROM t6 WHERE a>=60 ORDER BY a;
+
+--connection server_2
+SET DEBUG_SYNC= "now WAIT_FOR t2_ready1";
+# T2 has now started running, but has not yet done mark_start_commit()
+SET DEBUG_SYNC= "now SIGNAL t1_cont1";
+SET DEBUG_SYNC= "now WAIT_FOR t1_ready2";
+# T1 has now done unmark_start_commit() in preparation for its retry.
+
+--connection server_2
+# Let the preparation transaction complete, so that the same worker thread
+# can continue with the transaction T3.
+SET DEBUG_SYNC= "now SIGNAL prep_cont";
+SET DEBUG_SYNC= "now WAIT_FOR t3_ready";
+# T3 has now gone to wait for T2 to start committing
+SET DEBUG_SYNC= "now SIGNAL t2_cont1";
+SET DEBUG_SYNC= "now WAIT_FOR t2_ready2";
+# T2 has now done mark_start_commit().
+# Let things run, and check that T3 does not get deadlocked.
+SET DEBUG_SYNC= "now SIGNAL t1_cont2";
+--sync_with_master
+
+--connection server_1
+--save_master_pos
+--connection server_2
+--sync_with_master
+SELECT * FROM t2 WHERE a>=60 ORDER BY a;
+SELECT * FROM t1 WHERE a>=60 ORDER BY a;
+SELECT * FROM t6 WHERE a>=60 ORDER BY a;
+SET DEBUG_SYNC="reset";
+
+# Re-spawn the worker threads to remove any DBUG injections or DEBUG_SYNC.
+--source include/stop_slave.inc
+SET GLOBAL debug_dbug=@old_dbug;
+SET GLOBAL slave_parallel_threads=0;
+SET GLOBAL slave_parallel_threads=10;
+--source include/start_slave.inc
+
+
+# Clean up.
--connection server_2
--source include/stop_slave.inc
SET GLOBAL slave_parallel_threads=@old_parallel_threads;
diff --git a/mysql-test/suite/rpl/t/rpl_parallel2.test b/mysql-test/suite/rpl/t/rpl_parallel2.test
index 39b35063c7c..51c9e39a26a 100644
--- a/mysql-test/suite/rpl/t/rpl_parallel2.test
+++ b/mysql-test/suite/rpl/t/rpl_parallel2.test
@@ -25,6 +25,13 @@ INSERT INTO t1 VALUES (1,sleep(2));
--connection server_2
--sync_with_master
+# The slave position (which --sync_with_master waits for) is updated just
+# before the Seconds_Behind_Master. So we have to wait for the zero status
+# to appear, otherwise there is a small window between --sync_with_master
+# and SHOW SLAVE STATUS where we can see a non-zero value.
+--let $slave_param= Seconds_Behind_Master
+--let $slave_param_value= 0
+--source include/wait_for_slave_param.inc
--echo Seconds_Behind_Master should be zero here because the slave is fully caught up and idle.
--let $status_items= Seconds_Behind_Master
--source include/show_slave_status.inc
diff --git a/mysql-test/suite/sys_vars/r/sql_log_bin_basic.result b/mysql-test/suite/sys_vars/r/sql_log_bin_basic.result
index 5e8fe4e02f7..909c434340c 100644
--- a/mysql-test/suite/sys_vars/r/sql_log_bin_basic.result
+++ b/mysql-test/suite/sys_vars/r/sql_log_bin_basic.result
@@ -57,11 +57,16 @@ ERROR 42000: Variable 'sql_log_bin' can't be set to the value of '¹'
SET @@session.sql_log_bin = NO;
ERROR 42000: Variable 'sql_log_bin' can't be set to the value of 'NO'
'#-------------------FN_DYNVARS_156_05----------------------------#'
+SELECT @@global.sql_log_bin;
+@@global.sql_log_bin
+1
SET @@global.sql_log_bin = 0;
+ERROR HY000: Variable 'sql_log_bin' is a SESSION variable
SELECT @@global.sql_log_bin;
@@global.sql_log_bin
-0
+1
SET @@global.sql_log_bin = 1;
+ERROR HY000: Variable 'sql_log_bin' is a SESSION variable
'#----------------------FN_DYNVARS_156_06------------------------#'
SELECT count(VARIABLE_VALUE) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='sql_log_bin';
count(VARIABLE_VALUE)
diff --git a/mysql-test/suite/sys_vars/r/stored_program_cache_basic.result b/mysql-test/suite/sys_vars/r/stored_program_cache_basic.result
index f1638520f72..7f882255567 100644
--- a/mysql-test/suite/sys_vars/r/stored_program_cache_basic.result
+++ b/mysql-test/suite/sys_vars/r/stored_program_cache_basic.result
@@ -23,7 +23,7 @@ Warnings:
Warning 1292 Truncated incorrect stored_program_cache value: '-1'
SELECT @@global.stored_program_cache;
@@global.stored_program_cache
-256
+0
SET @@global.stored_program_cache =100000000000;
Warnings:
Warning 1292 Truncated incorrect stored_program_cache value: '100000000000'
@@ -31,11 +31,9 @@ SELECT @@global.stored_program_cache;
@@global.stored_program_cache
524288
SET @@global.stored_program_cache = 0;
-Warnings:
-Warning 1292 Truncated incorrect stored_program_cache value: '0'
SELECT @@global.stored_program_cache;
@@global.stored_program_cache
-256
+0
SET @@global.stored_program_cache = 10000.01;
ERROR 42000: Incorrect argument type to variable 'stored_program_cache'
SET @@global.stored_program_cache = ON;
diff --git a/mysql-test/suite/sys_vars/r/stored_program_cache_func.result b/mysql-test/suite/sys_vars/r/stored_program_cache_func.result
new file mode 100644
index 00000000000..11151847d6b
--- /dev/null
+++ b/mysql-test/suite/sys_vars/r/stored_program_cache_func.result
@@ -0,0 +1,52 @@
+create procedure p1() select 1;
+flush status;
+show status like 'handler_read_key';
+Variable_name Value
+Handler_read_key 0
+call p1;
+1
+1
+show status like 'handler_read_key';
+Variable_name Value
+Handler_read_key 1
+call p1;
+1
+1
+show status like 'handler_read_key';
+Variable_name Value
+Handler_read_key 1
+set global stored_program_cache=0;
+call p1;
+1
+1
+show status like 'handler_read_key';
+Variable_name Value
+Handler_read_key 2
+call p1;
+1
+1
+show status like 'handler_read_key';
+Variable_name Value
+Handler_read_key 3
+drop procedure p1;
+set global stored_program_cache=default;
+create procedure pr(i int) begin
+create table t1 (a int, b int);
+if (i = 1) then alter table t1 drop a;
+else alter table t1 drop b;
+end if;
+select * from t1;
+drop table t1;
+end |
+call pr(1);
+b
+call pr(2);
+ERROR 42S22: Unknown column 'test.t1.b' in 'field list'
+drop table t1;
+set global stored_program_cache=0;
+call pr(1);
+b
+call pr(2);
+a
+drop procedure pr;
+set global stored_program_cache=default;
diff --git a/mysql-test/suite/sys_vars/t/sql_log_bin_basic.test b/mysql-test/suite/sys_vars/t/sql_log_bin_basic.test
index 153a0be0b8f..5f5e3920862 100644
--- a/mysql-test/suite/sys_vars/t/sql_log_bin_basic.test
+++ b/mysql-test/suite/sys_vars/t/sql_log_bin_basic.test
@@ -109,11 +109,15 @@ SET @@session.sql_log_bin = NO;
--echo '#-------------------FN_DYNVARS_156_05----------------------------#'
###########################################################################
-# Test if accessing global sql_log_bin gives error #
+# Test if setting global sql_log_bin gives error, #
+# and there is no error on reading it. #
###########################################################################
+SELECT @@global.sql_log_bin;
+--Error ER_INCORRECT_GLOBAL_LOCAL_VAR
SET @@global.sql_log_bin = 0;
SELECT @@global.sql_log_bin;
+--Error ER_INCORRECT_GLOBAL_LOCAL_VAR
SET @@global.sql_log_bin = 1;
--echo '#----------------------FN_DYNVARS_156_06------------------------#'
diff --git a/mysql-test/suite/sys_vars/t/stored_program_cache_func.test b/mysql-test/suite/sys_vars/t/stored_program_cache_func.test
new file mode 100644
index 00000000000..f85fc8eb1bf
--- /dev/null
+++ b/mysql-test/suite/sys_vars/t/stored_program_cache_func.test
@@ -0,0 +1,43 @@
+create procedure p1() select 1;
+
+flush status;
+show status like 'handler_read_key';
+call p1;
+show status like 'handler_read_key';
+call p1;
+show status like 'handler_read_key';
+
+set global stored_program_cache=0;
+
+call p1;
+show status like 'handler_read_key';
+call p1;
+show status like 'handler_read_key';
+
+drop procedure p1;
+set global stored_program_cache=default;
+
+# Test for missing SP automatic reparsing.
+# when MDEV-5816 is implemented, it should be removed.
+
+--delimiter |
+create procedure pr(i int) begin
+ create table t1 (a int, b int);
+ if (i = 1) then alter table t1 drop a;
+ else alter table t1 drop b;
+ end if;
+ select * from t1;
+ drop table t1;
+end |
+--delimiter ;
+call pr(1);
+--error ER_BAD_FIELD_ERROR
+call pr(2);
+drop table t1;
+
+set global stored_program_cache=0;
+call pr(1);
+call pr(2);
+drop procedure pr;
+set global stored_program_cache=default;
+
diff --git a/mysql-test/t/change_user_notembedded.test b/mysql-test/t/change_user_notembedded.test
index bf5d1956cd5..19421c6dd33 100644
--- a/mysql-test/t/change_user_notembedded.test
+++ b/mysql-test/t/change_user_notembedded.test
@@ -22,3 +22,5 @@ change_user;
disconnect test;
connection default;
+--echo that's all
+
diff --git a/mysql-test/t/create_or_replace.test b/mysql-test/t/create_or_replace.test
index 2bdd23c21f6..9e37950dbef 100644
--- a/mysql-test/t/create_or_replace.test
+++ b/mysql-test/t/create_or_replace.test
@@ -346,20 +346,26 @@ LOCK TABLE t1 WRITE;
--let $con_id = `SELECT CONNECTION_ID()`
--send CREATE OR REPLACE TABLE t1 LIKE tmp
--connection default
+let $wait_condition= SELECT COUNT(*)=1 FROM information_schema.processlist
+ WHERE state= 'Waiting for table metadata lock';
+--source include/wait_condition.inc
--replace_result $con_id con_id
--eval KILL QUERY $con_id
--connection con1
---error 0,ER_QUERY_INTERRUPTED
+--error ER_QUERY_INTERRUPTED
--reap
--send CREATE OR REPLACE TABLE t1 (a int)
--connection default
+let $wait_condition= SELECT COUNT(*)=1 FROM information_schema.processlist
+ WHERE state= 'Waiting for table metadata lock';
+--source include/wait_condition.inc
--replace_result $con_id con_id
--eval KILL QUERY $con_id
--connection con1
---error 0,ER_QUERY_INTERRUPTED
+--error ER_QUERY_INTERRUPTED
--reap
--disconnect con1
--connection default
diff --git a/mysql-test/t/ctype_binary.test b/mysql-test/t/ctype_binary.test
index 8b57854de83..4a2646d1db5 100644
--- a/mysql-test/t/ctype_binary.test
+++ b/mysql-test/t/ctype_binary.test
@@ -10,3 +10,15 @@ set names binary;
--echo #
--echo # End of 5.5 tests
--echo #
+
+
+--echo #
+--echo # Start of 10.0 tests
+--echo #
+
+SET NAMES binary;
+--source include/ctype_like_cond_propagation.inc
+
+--echo #
+--echo # End of 10.0 tests
+--echo #
diff --git a/mysql-test/t/ctype_latin1.test b/mysql-test/t/ctype_latin1.test
index 5da1534029b..336d8ca761d 100644
--- a/mysql-test/t/ctype_latin1.test
+++ b/mysql-test/t/ctype_latin1.test
@@ -210,6 +210,13 @@ set names latin1;
let $ctype_unescape_combinations=selected;
--source include/ctype_unescape.inc
+SET NAMES latin1;
+--source include/ctype_like_cond_propagation.inc
+
+SET NAMES latin1 COLLATE latin1_bin;
+--source include/ctype_like_cond_propagation.inc
+
+
--echo #
--echo # MDEV-6752 Trailing incomplete characters are not replaced to question marks on conversion
--echo #
diff --git a/mysql-test/t/ctype_uca.test b/mysql-test/t/ctype_uca.test
index 79f163d7e5d..95008d83a38 100644
--- a/mysql-test/t/ctype_uca.test
+++ b/mysql-test/t/ctype_uca.test
@@ -571,6 +571,14 @@ SET NAMES utf8mb4 COLLATE utf8mb4_unicode_520_ci;
--echo #
--echo
+SET NAMES utf8 COLLATE utf8_unicode_ci;
+--source include/ctype_like_cond_propagation.inc
+--source include/ctype_like_cond_propagation_utf8_german.inc
+
+SET NAMES utf8 COLLATE utf8_german2_ci;
+--source include/ctype_like_cond_propagation.inc
+--source include/ctype_like_cond_propagation_utf8_german.inc
+
--echo #
--echo # MDEV-4929 Myanmar collation
--echo #
@@ -580,5 +588,24 @@ SET collation_connection=ucs2_myanmar_ci;
--source include/ctype_myanmar.inc
--echo #
+--echo # MDEV-7366 SELECT 'a' = BINARY 'A' returns 1 (utf8 charset, utf8_unicode_ci collation)
+--echo #
+SET NAMES utf8 COLLATE utf8_unicode_ci;
+SELECT 'a' = BINARY 'A';
+SELECT BINARY 'A' = 'a';
+
+--echo #
+--echo # Wrong result set for WHERE a='oe' COLLATE utf8_german2_ci AND a='oe'
+--echo #
+SET NAMES utf8 COLLATE utf8_german2_ci;
+CREATE TABLE t1 (a CHAR(10) CHARACTER SET utf8);
+INSERT INTO t1 VALUES ('ö'),('oe');
+SELECT * FROM t1 WHERE a='oe' AND a='oe' COLLATE utf8_german2_ci;
+SELECT * FROM t1 WHERE a='oe' COLLATE utf8_german2_ci AND a='oe';
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE a='oe' AND a='oe' COLLATE utf8_german2_ci;
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE a='oe' COLLATE utf8_german2_ci AND a='oe';
+DROP TABLE t1;
+
+--echo #
--echo # End of MariaDB-10.0 tests
--echo #
diff --git a/mysql-test/t/ctype_ucs.test b/mysql-test/t/ctype_ucs.test
index d269fb35dfe..33d41e9bc4c 100644
--- a/mysql-test/t/ctype_ucs.test
+++ b/mysql-test/t/ctype_ucs.test
@@ -897,6 +897,12 @@ DROP TABLE t1;
--echo # Start of 10.0 tests
--echo #
+SET NAMES latin1, collation_connection=ucs2_bin;
+--source include/ctype_like_cond_propagation.inc
+SET NAMES latin1, collation_connection=ucs2_general_ci;
+--source include/ctype_like_cond_propagation.inc
+SET NAMES latin1;
+
--echo #
--echo # MDEV-6661 PI() does not work well in UCS2/UTF16/UTF32 context
--echo #
diff --git a/mysql-test/t/ctype_utf8.test b/mysql-test/t/ctype_utf8.test
index e02d5a915b7..1e9047cca8e 100644
--- a/mysql-test/t/ctype_utf8.test
+++ b/mysql-test/t/ctype_utf8.test
@@ -1658,6 +1658,13 @@ set max_sort_length=default;
--echo # Start of 10.0 tests
--echo #
+SET NAMES utf8 COLLATE utf8_bin;
+--source include/ctype_like_cond_propagation.inc
+SET NAMES utf8;
+--source include/ctype_like_cond_propagation.inc
+--source include/ctype_like_cond_propagation_utf8_german.inc
+
+
--echo #
--echo # MDEV-6666 Malformed result for CONCAT(utf8_column, binary_string)
--echo #
diff --git a/mysql-test/t/frm_bad_row_type-7333.test b/mysql-test/t/frm_bad_row_type-7333.test
new file mode 100644
index 00000000000..5100a85cb22
--- /dev/null
+++ b/mysql-test/t/frm_bad_row_type-7333.test
@@ -0,0 +1,14 @@
+#
+# MDEV-7333 "'show table status like 'table_name'" on tokudb table lead to MariaDB crash
+#
+let $datadir= `select @@datadir`;
+call mtr.add_suppression("bad_row_type.frm: invalid value 11 for the field row_format");
+copy_file std_data/bad_row_type.MYD $datadir/test/bad_row_type.MYD;
+copy_file std_data/bad_row_type.MYI $datadir/test/bad_row_type.MYI;
+copy_file std_data/bad_row_type.frm $datadir/test/bad_row_type.frm;
+
+select * from bad_row_type;
+show create table bad_row_type;
+replace_column 12 x 13 x;
+show table status like 'bad_row_type';
+drop table bad_row_type;
diff --git a/mysql-test/t/func_time.test b/mysql-test/t/func_time.test
index a3f488a8d1e..2b189765bbc 100644
--- a/mysql-test/t/func_time.test
+++ b/mysql-test/t/func_time.test
@@ -1631,3 +1631,21 @@ CREATE TABLE t1 ( d DATE, t TIME );
INSERT INTO t1 VALUES ('2008-12-05','22:34:09'),('2005-03-27','14:26:02');
SELECT EXTRACT(DAY_MINUTE FROM GREATEST(t,d)), GREATEST(t,d) FROM t1;
DROP TABLE t1;
+
+
+--echo #
+--echo # MDEV-7221 from_days fails after null value
+--echo #
+CREATE TABLE t1 (
+ id INT(11) NOT NULL PRIMARY KEY,
+ date1 DATE NULL DEFAULT NULL
+);
+INSERT INTO t1 VALUES (12, '2011-05-12');
+INSERT INTO t1 VALUES (13, NULL);
+INSERT INTO t1 VALUES (14, '2009-10-23');
+INSERT INTO t1 VALUES (15, '2014-10-30');
+INSERT INTO t1 VALUES (16, NULL);
+INSERT INTO t1 VALUES (17, NULL);
+INSERT INTO t1 VALUES (18, '2010-10-13');
+SELECT a.id,a.date1,FROM_DAYS(TO_DAYS(a.date1)-10) as date2, DATE_ADD(a.date1,INTERVAL -10 DAY),TO_DAYS(a.date1)-10 FROM t1 a ORDER BY a.id;
+DROP TABLE t1;
diff --git a/mysql-test/t/group_by.test b/mysql-test/t/group_by.test
index 35bd447e9ea..4b1cb82d0f9 100644
--- a/mysql-test/t/group_by.test
+++ b/mysql-test/t/group_by.test
@@ -1664,6 +1664,21 @@ WHERE t1a.c1 = c2 GROUP BY i2;
DROP TABLE t1,t2;
+--echo #
+--echo # MDEV-6855
+--echo # MIN(*) with subqueries with IS NOT NULL in WHERE clause crashed.
+--echo #
+
+CREATE TABLE t1 (i INT, c VARCHAR(3), KEY(c,i)) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (7,'foo'),(0,'bar');
+
+CREATE TABLE t2 (j INT) ENGINE=MyISAM;
+INSERT INTO t2 VALUES (0),(8),(1),(8),(9);
+
+SELECT MAX(i), c FROM t1
+WHERE c != 'qux' AND ( SELECT SUM(j) FROM t1, t2 ) IS NOT NULL GROUP BY c;
+drop table t1,t2;
+
#
# End of MariaDB 5.5 tests
#
diff --git a/mysql-test/t/group_by_innodb.test b/mysql-test/t/group_by_innodb.test
index df213cc189f..75ee3d0802a 100644
--- a/mysql-test/t/group_by_innodb.test
+++ b/mysql-test/t/group_by_innodb.test
@@ -67,3 +67,22 @@ DROP TABLE t1;
--echo End of 5.5 tests
+--echo #
+--echo # MDEV-5719: Wrong result with GROUP BY and LEFT OUTER JOIN
+--echo #
+CREATE TABLE t1 (oidGroup INT, oid INT PRIMARY KEY)ENGINE=INNODB;
+INSERT INTO t1 VALUES (1,1),(1,2),(1,3),(1,4);
+
+CREATE TABLE t2 (oid INT PRIMARY KEY)ENGINE=INNODB;
+INSERT INTO t2 VALUES (3);
+
+# Returns a value
+SELECT a.oidGroup, a.oid, b.oid FROM t1 a LEFT JOIN t2 b ON
+a.oid=b.oid WHERE a.oidGroup=1;
+
+SELECT a.oidGroup, a.oid, b.oid FROM t1 a LEFT JOIN t2 b ON
+a.oid=b.oid WHERE a.oidGroup=1 GROUP BY a.oid;
+
+DROP TABLE t1, t2;
+
+--echo # End of tests
diff --git a/mysql-test/t/insert_update_autoinc-7150.test b/mysql-test/t/insert_update_autoinc-7150.test
new file mode 100644
index 00000000000..1229898b4aa
--- /dev/null
+++ b/mysql-test/t/insert_update_autoinc-7150.test
@@ -0,0 +1,8 @@
+#
+# MDEV-7150 Wrong auto increment values on INSERT .. ON DUPLICATE KEY UPDATE when the inserted columns include NULL in an auto-increment column
+#
+create table t1 (a int(10) auto_increment primary key, b int(11));
+insert t1 values (null,1);
+insert t1 values (null,2), (1,-1), (null,3) on duplicate key update b=values(b);
+select * from t1;
+drop table t1;
diff --git a/mysql-test/t/key_cache.test b/mysql-test/t/key_cache.test
index 86e56a8301b..d28e7c23a4c 100644
--- a/mysql-test/t/key_cache.test
+++ b/mysql-test/t/key_cache.test
@@ -301,8 +301,8 @@ select * from t2;
update t1 set p=3 where p=1;
update t2 set i=2 where i=1;
---replace_result 1804 KEY_BLOCKS_UNUSED 1801 KEY_BLOCKS_UNUSED 1663 KEY_BLOCKS_UNUSED 1782 KEY_BLOCKS_UNUSED
-show status like 'key_%';
+select * from information_schema.session_status where variable_name like 'key_%' and variable_name != 'Key_blocks_unused';
+select variable_value into @key_blocks_unused from information_schema.session_status where variable_name = 'Key_blocks_unused';
--replace_column 7 #
select * from information_schema.key_caches where segment_number is null;
@@ -334,8 +334,8 @@ update t1 set p=3 where p=1;
update t2 set i=2 where i=1;
---replace_result 1800 KEY_BLOCKS_UNUSED 1794 KEY_BLOCKS_UNUSED 1656 KEY_BLOCKS_UNUSED 1775 KEY_BLOCKS_UNUSED
-show status like 'key_%';
+select * from information_schema.session_status where variable_name like 'key_%' and variable_name != 'Key_blocks_unused';
+select variable_value < @key_blocks_unused from information_schema.session_status where variable_name = 'Key_blocks_unused';
--replace_column 7 #
select * from information_schema.key_caches where segment_number is null;
@@ -359,8 +359,8 @@ select * from t2;
update t1 set p=3 where p=1;
update t2 set i=2 where i=1;
---replace_result 1804 KEY_BLOCKS_UNUSED 1801 KEY_BLOCKS_UNUSED 1663 KEY_BLOCKS_UNUSED 1782 KEY_BLOCKS_UNUSED
-show status like 'key_%';
+select * from information_schema.session_status where variable_name like 'key_%' and variable_name != 'Key_blocks_unused';
+select variable_value = @key_blocks_unused from information_schema.session_status where variable_name = 'Key_blocks_unused';
--replace_column 7 #
select * from information_schema.key_caches where segment_number is null;
diff --git a/mysql-test/t/kill-2-master.opt b/mysql-test/t/kill-2-master.opt
new file mode 100644
index 00000000000..ab6ca1731f5
--- /dev/null
+++ b/mysql-test/t/kill-2-master.opt
@@ -0,0 +1 @@
+--skip-name-resolve
diff --git a/mysql-test/t/kill-2.test b/mysql-test/t/kill-2.test
new file mode 100644
index 00000000000..0c1177722b4
--- /dev/null
+++ b/mysql-test/t/kill-2.test
@@ -0,0 +1,29 @@
+#
+# Test KILL and KILL QUERY statements.
+#
+# Killing a connection in an embedded server does not work like in a normal
+# server, if it is waiting for a new statement. In an embedded server, the
+# connection does not read() from a socket, but returns control to the
+# application. 'mysqltest' does not handle the kill request.
+#
+
+-- source include/not_embedded.inc
+-- source include/not_threadpool.inc
+
+--echo #
+--echo # MDEV-6896 kill user command cause MariaDB crash!
+--echo #
+
+create user foo@'127.0.0.1';
+
+--connect (con1,127.0.0.1,foo,,)
+
+--connection default
+select user from information_schema.processlist;
+kill user foo@'127.0.0.1';
+
+let $wait_condition=
+ select count(*) = 0 from information_schema.processlist
+ where user = "foo";
+--source include/wait_condition.inc
+drop user foo@'127.0.0.1';
diff --git a/mysql-test/t/kill_processlist-6619.test b/mysql-test/t/kill_processlist-6619.test
index 2333f02eac6..95af83be56d 100644
--- a/mysql-test/t/kill_processlist-6619.test
+++ b/mysql-test/t/kill_processlist-6619.test
@@ -2,16 +2,26 @@
# MDEV-6619 SHOW PROCESSLIST returns empty result set after KILL QUERY
#
--source include/not_embedded.inc
+--source include/have_debug_sync.inc
+
--enable_connect_log
--connect (con1,localhost,root,,)
--let $con_id = `SELECT CONNECTION_ID()`
--replace_column 1 # 3 # 6 # 7 #
SHOW PROCESSLIST;
+SET DEBUG_SYNC='before_execute_sql_command SIGNAL ready WAIT_FOR go';
+send SHOW PROCESSLIST;
--connection default
+# We must wait for the SHOW PROCESSLIST query to have started before sending
+# the kill. Otherwise, the KILL may be lost since it is reset at the start of
+# query execution.
+SET DEBUG_SYNC='now WAIT_FOR ready';
--replace_result $con_id con_id
eval KILL QUERY $con_id;
+SET DEBUG_SYNC='now SIGNAL go';
--connection con1
--error ER_QUERY_INTERRUPTED
-SHOW PROCESSLIST;
+reap;
+SET DEBUG_SYNC='reset';
--replace_column 1 # 3 # 6 # 7 #
SHOW PROCESSLIST;
diff --git a/mysql-test/t/kill_query-6728.test b/mysql-test/t/kill_query-6728.test
new file mode 100644
index 00000000000..485256a65b6
--- /dev/null
+++ b/mysql-test/t/kill_query-6728.test
@@ -0,0 +1,14 @@
+#
+# MDEV-6728 KILL QUERY executed on an idle connection can interrupt the next query
+#
+--enable_connect_log
+--connect (con1,localhost,root,,)
+let $id=`select connection_id()`;
+
+--connection default
+--replace_result $id id
+eval kill query $id;
+
+--connection con1
+select count(*) > 0 from mysql.user;
+
diff --git a/mysql-test/t/lock_sync.test b/mysql-test/t/lock_sync.test
index f00080d917b..ef79cc2c0f4 100644
--- a/mysql-test/t/lock_sync.test
+++ b/mysql-test/t/lock_sync.test
@@ -1186,7 +1186,6 @@ DROP TABLE t1;
disconnect con1;
disconnect con2;
-
# Check that all connections opened by test cases in this file are really
# gone so execution of other tests won't be affected by their presence.
--source include/wait_until_count_sessions.inc
diff --git a/mysql-test/t/log_tables.test b/mysql-test/t/log_tables.test
index 8a2bd4cf6c1..6fd26ab2011 100644
--- a/mysql-test/t/log_tables.test
+++ b/mysql-test/t/log_tables.test
@@ -727,7 +727,8 @@ CREATE TABLE `db_17876.slow_log_data` (
`last_insert_id` int(11) default NULL,
`insert_id` int(11) default NULL,
`server_id` int(11) default NULL,
- `sql_text` mediumtext
+ `sql_text` mediumtext,
+ `thread_id` bigint(21) unsigned default NULL
);
CREATE TABLE `db_17876.general_log_data` (
diff --git a/mysql-test/t/mdev6830-master.opt b/mysql-test/t/mdev6830-master.opt
new file mode 100644
index 00000000000..2a8c27d4731
--- /dev/null
+++ b/mysql-test/t/mdev6830-master.opt
@@ -0,0 +1 @@
+--debug
diff --git a/mysql-test/t/mdev6830.test b/mysql-test/t/mdev6830.test
new file mode 100644
index 00000000000..24565d04fed
--- /dev/null
+++ b/mysql-test/t/mdev6830.test
@@ -0,0 +1,63 @@
+
+--source include/have_debug.inc
+
+--disable_warnings
+drop table if exists t1,t2,t3;
+drop view if exists v2,v3;
+--enable_warnings
+CREATE TABLE t1 (pk INT PRIMARY KEY) ENGINE=MyISAM;
+
+CREATE TABLE t2 (
+ f1 DATE,
+ f2 VARCHAR(1024),
+ f3 VARCHAR(10),
+ f4 DATE,
+ f5 VARCHAR(10),
+ f6 VARCHAR(10),
+ f7 VARCHAR(10),
+ f8 DATETIME,
+ f9 INT,
+ f10 VARCHAR(1024),
+ f11 VARCHAR(1024),
+ f12 INT,
+ f13 VARCHAR(1024)
+) ENGINE=MyISAM;
+
+CREATE OR REPLACE VIEW v2 AS SELECT * FROM t2;
+
+CREATE TABLE t3 (
+ f1 VARCHAR(1024),
+ f2 VARCHAR(1024),
+ f3 DATETIME,
+ f4 VARCHAR(10),
+ f5 INT,
+ f6 VARCHAR(10),
+ f7 VARCHAR(1024),
+ f8 VARCHAR(10),
+ f9 INT,
+ f10 DATE,
+ f11 INT,
+ f12 VARCHAR(1024),
+ f13 VARCHAR(10),
+ f14 DATE,
+ f15 DATETIME
+) ENGINE=MyISAM;
+
+CREATE OR REPLACE ALGORITHM=TEMPTABLE VIEW v3 AS SELECT * FROM t3;
+
+INSERT INTO t3 VALUES
+ ('FOO','foo','2000-08-04 00:00:00','one',1,'1','FOO','foo',1,'2004-05-09',1,'one','one','2001-12-07','2001-10-17 08:25:04'),
+ ('BAR','bar','2001-01-01 04:52:37','two',2,'2','BAR','bar',2,'2008-01-01',2,'two','two','2006-06-19','2002-01-01 08:22:49');
+
+CREATE TABLE t4 (f1 VARCHAR(10), f2 INT) ENGINE=MyISAM;
+
+SELECT * FROM t1;
+
+--error ER_BAD_FIELD_ERROR
+SELECT non_existing FROM v2;
+
+SELECT * FROM t1, v3, t4 WHERE v3.f1 = t4.f1 AND t4.f2 = 6 AND t1.pk = v3.f5;
+
+drop table t1,t2,t3,t4;
+drop view v2,v3;
+
diff --git a/mysql-test/t/partition_innodb.test b/mysql-test/t/partition_innodb.test
index 1e2aacd474a..a74e95ab65b 100644
--- a/mysql-test/t/partition_innodb.test
+++ b/mysql-test/t/partition_innodb.test
@@ -581,7 +581,7 @@ CREATE INDEX i1 ON t1 (a);
DROP TABLE t1;
# Before the fix it should show extra file like #sql-2405_2.par
---list_files $MYSQLD_DATADIR/test/ *
+--list_files $MYSQLD_DATADIR/test/ *.par
--disable_parsing
--echo #
@@ -616,7 +616,7 @@ ALTER TABLE t1 REORGANIZE PARTITION pMAX INTO
SHOW WARNINGS;
#Contents of the 'test' database directory:
---list_files $MYSQLD_DATADIR/test
+--list_files $MYSQLD_DATADIR/test/ *.par
disconnect con1;
connection default;
diff --git a/mysql-test/t/partition_innodb_plugin.test b/mysql-test/t/partition_innodb_plugin.test
index 2eb9a2fa2a0..8044ae9ec5c 100644
--- a/mysql-test/t/partition_innodb_plugin.test
+++ b/mysql-test/t/partition_innodb_plugin.test
@@ -52,7 +52,7 @@ KEY_BLOCK_SIZE=4
PARTITION BY HASH(id) PARTITIONS 1;
--replace_result #p# #P#
---list_files $MYSQLD_DATADIR/test
+--list_files $MYSQLD_DATADIR/test t1*
SHOW CREATE TABLE t1;
SET GLOBAL innodb_file_per_table = OFF;
@@ -71,14 +71,14 @@ LOCK TABLE t1 WRITE;
ALTER TABLE t1 ADD PARTITION PARTITIONS 1;
--replace_result #p# #P#
---list_files $MYSQLD_DATADIR/test
+--list_files $MYSQLD_DATADIR/test t1*
--echo # This SET is not needed to reproduce the bug,
--echo # it is here just to make the test case more realistic
SET innodb_strict_mode = OFF;
ALTER TABLE t1 ADD PARTITION PARTITIONS 2;
--replace_result #p# #P#
---list_files $MYSQLD_DATADIR/test
+--list_files $MYSQLD_DATADIR/test t1*
# really bug#56172
ALTER TABLE t1 REBUILD PARTITION p0;
diff --git a/mysql-test/t/processlist.test b/mysql-test/t/processlist.test
index 9c555c0f9fb..a8f8a4ed64c 100644
--- a/mysql-test/t/processlist.test
+++ b/mysql-test/t/processlist.test
@@ -2,6 +2,7 @@
# MDEV-4578 information_schema.processlist reports incorrect value for Time (2147483647)
#
+source include/have_debug.inc;
source include/have_debug_sync.inc;
let $tid= `SELECT CONNECTION_ID()`;
@@ -21,6 +22,7 @@ SET DEBUG_SYNC = 'now SIGNAL fill_schema_proceed';
connection con1;
--replace_result $tid TID
reap;
+set debug_sync='reset';
connection default;
#
@@ -28,15 +30,13 @@ connection default;
#
connection con1;
-# Trigger a signal once the thread has gone from "Query" to "Sleep" command
-# state. Note we need to execute this twice: Once at the end of SET DEBUG_SYNC,
-# and once for the intended time, at the end of SELECT SLEEP().
-SET DEBUG_SYNC = 'dispatch_command_end SIGNAL query_done EXECUTE 2';
-connection default;
-# Wait for and clear the first signal set during SET DEBUG_SYNC.
-SET DEBUG_SYNC= 'now WAIT_FOR query_done';
-SET DEBUG_SYNC= 'now SIGNAL nosignal';
-connection con1;
+# This DBUG insertion triggers a DEBUG_SYNC signal "query_done" once
+# the below SELECT SLEEP(5) has gone from "Query" to "Sleep" command
+# state. (We cannot just set the DEBUG_SYNC directly here, because
+# then it can trigger at the end of the SET DEBUG_SYNC statement (or
+# at the end of the Prepare step of the SELECT, if --ps-protocol),
+# thus occuring too early).
+SET debug_dbug="+d,sleep_inject_query_done_debug_sync";
select sleep(5); #run a query that will take some time
connection default;
diff --git a/mysql-test/t/select_found.test b/mysql-test/t/select_found.test
index d529dc415e7..88940eaf2b8 100644
--- a/mysql-test/t/select_found.test
+++ b/mysql-test/t/select_found.test
@@ -257,3 +257,23 @@ select sql_calc_found_rows 1 as res from t1 left join t2 on i1 = i2 where v2 = 5
select found_rows() as count;
drop table t1, t2;
+#
+# MDEV-7219 SQL_CALC_FOUND_ROWS yields wrong result
+#
+create table t1 (i int, v varchar(64), key (i));
+
+--disable_query_log
+let $1=150;
+while ($1)
+{
+ eval insert into t1 values ($1 % 2, 'foo');
+ dec $1;
+}
+--enable_query_log
+
+select sql_calc_found_rows * from t1 where i = 0 order by v limit 59,2;
+select found_rows();
+select sql_calc_found_rows * from t1 ignore index (i) where i = 0 order by v limit 59,2;
+select found_rows();
+drop table t1;
+
diff --git a/mysql-test/t/show_check.test b/mysql-test/t/show_check.test
index 14d50709921..c43193d1b57 100644
--- a/mysql-test/t/show_check.test
+++ b/mysql-test/t/show_check.test
@@ -1210,33 +1210,8 @@ disconnect conn1;
connection default;
DROP USER test_u@localhost;
-
---echo #
---echo # Bug #48985: show create table crashes if previous access to the table
---echo # was killed
---echo #
-
-connect(con1,localhost,root,,);
-CONNECTION con1;
-LET $ID= `SELECT connection_id()`;
-
-CONNECTION default;
---disable_query_log
-eval KILL QUERY $ID;
---enable_query_log
-
-CONNECTION con1;
---error ER_QUERY_INTERRUPTED
-SHOW CREATE TABLE non_existent;
-
-DISCONNECT con1;
---source include/wait_until_disconnected.inc
-CONNECTION default;
-
-
--echo End of 5.1 tests
-
--echo #
--echo # Bug#52593 SHOW CREATE TABLE is blocked if table is locked
--echo # for write by another connection
diff --git a/mysql-test/t/sp-innodb.test b/mysql-test/t/sp-innodb.test
new file mode 100644
index 00000000000..228ab42544d
--- /dev/null
+++ b/mysql-test/t/sp-innodb.test
@@ -0,0 +1,45 @@
+
+--source include/have_innodb.inc
+
+--disable_warnings
+drop table if exists t1,t2;
+drop procedure if exists p1;
+--enable_warnings
+
+--echo #
+--echo #MDEV-6985: MariaDB crashes on stored procedure call
+--echo #
+CREATE TABLE `t1` (
+ `ID` int(11) NOT NULL,
+ PRIMARY KEY (`ID`)
+) ENGINE=InnoDB;
+
+CREATE TABLE `t2` (
+ `ID` int(11) NOT NULL,
+ `DATE` datetime DEFAULT NULL,
+ PRIMARY KEY (`ID`)
+) ENGINE=InnoDB;
+
+--delimiter ;;
+
+CREATE PROCEDURE `p1`()
+BEGIN
+ DECLARE _mySelect CURSOR FOR
+ SELECT DISTINCT t1.ID
+ FROM t1
+ LEFT JOIN t2 AS t2 ON
+ t2.ID = t1.ID
+ AND t2.DATE = (
+ SELECT MAX(T3.DATE) FROM t2 AS T3 WHERE T3.ID = t2.ID AND T3.DATE<=NOW()
+ )
+ WHERE t1.ID = 1;
+ OPEN _mySelect;
+ CLOSE _mySelect;
+END ;;
+--delimiter ;
+
+CALL p1();
+CALL p1();
+
+drop procedure p1;
+drop table t1,t2;
diff --git a/mysql-test/t/sp_notembedded.test b/mysql-test/t/sp_notembedded.test
index dee6a7ee8f2..42a3dd193c4 100644
--- a/mysql-test/t/sp_notembedded.test
+++ b/mysql-test/t/sp_notembedded.test
@@ -374,6 +374,9 @@ CREATE VIEW v1 AS SELECT f1('a') FROM t1;
--send SELECT * FROM v1
--connection default
+let $wait_condition=
+ select count(*) = 2 from information_schema.processlist where state = "User sleep";
+--source include/wait_condition.inc
--disable_query_log
--eval KILL QUERY $ID_2
--eval KILL QUERY $ID_1
diff --git a/mysql-test/t/statistics_index_crash-7362.test b/mysql-test/t/statistics_index_crash-7362.test
new file mode 100644
index 00000000000..3873b896dae
--- /dev/null
+++ b/mysql-test/t/statistics_index_crash-7362.test
@@ -0,0 +1,30 @@
+# Test cases that cover the crashes within:
+# MDEV-7362 ANALYZE TABLES crash with table-independent-statistics gathering
+# MDEV-7380 engine-independent stats SEGV on ANALYZE TABLE (#2)
+
+--source include/have_stat_tables.inc
+--source include/have_innodb.inc
+
+CREATE TABLE t1 (a longtext, FULLTEXT KEY (`a`)) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (unhex('3E0D0A4141414142334E7A6143317963324541414141424977414141674541726D'));
+ANALYZE TABLE t1 PERSISTENT FOR ALL;
+--sorted_result
+SELECT * FROM mysql.index_stats WHERE index_name='a' AND table_name='t1';
+DROP TABLE t1;
+
+CREATE TABLE t1 (a longtext, FULLTEXT KEY (`a`)) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (unhex('3E0D0A4141414142334E7A6143317963324541414141424977414141674541726D'));
+ANALYZE TABLE t1 PERSISTENT FOR ALL;
+--sorted_result
+SELECT * FROM mysql.index_stats WHERE index_name='a' AND table_name='t1';
+DROP TABLE t1;
+
+CREATE TABLE geom (g GEOMETRY NOT NULL, SPATIAL INDEX(g)) ENGINE=MyISAM;
+INSERT INTO geom VALUES
+ (MultiPolygonFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')),
+ (MPolyFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')),
+ (MPolyFromWKB(AsWKB(MultiPolygon(Polygon(LineString(Point(0, 3), Point(3, 3), Point(3, 0), Point(0, 3)))))));
+ANALYZE TABLE geom PERSISTENT FOR ALL;
+--sorted_result
+SELECT * FROM mysql.index_stats WHERE index_name='g' AND table_name='geom';
+DROP TABLE geom;
diff --git a/mysql-test/t/type_timestamp.test b/mysql-test/t/type_timestamp.test
index 1c17743e7f1..77ce8c595ca 100644
--- a/mysql-test/t/type_timestamp.test
+++ b/mysql-test/t/type_timestamp.test
@@ -446,3 +446,50 @@ SELECT MAX(dt) = '2011-01-06 12:34:30' FROM t1;
DROP TABLE t1;
--echo End of 5.5 tests
+
+--echo #
+--echo # MDEV-7254: Assigned expression is evaluated twice when updating column TIMESTAMP NOT NULL
+--echo #
+
+SET time_zone='+02:00';
+create table t1(value timestamp not null);
+set @a:=0;
+delimiter //;
+create function f1 () returns timestamp
+begin
+ set @a = @a + 1;
+ return NULL;
+end//
+delimiter ;//
+set timestamp=12340;
+insert t1 values (f1());
+select @a, value from t1;
+set timestamp=12350;
+update t1 set value = f1();
+select @a, value from t1;
+drop table t1;
+drop function f1;
+set timestamp=0;
+
+# Verify no regressions to TIMESTAMP NULL
+create table t1(value timestamp null);
+set @a:=0;
+delimiter //;
+create function f1 () returns timestamp
+begin
+ set @a = @a + 1;
+ return NULL;
+end//
+delimiter ;//
+set timestamp=12340;
+insert t1 values (f1());
+select @a, value from t1;
+set timestamp=12350;
+update t1 set value = f1();
+select @a, value from t1;
+drop table t1;
+drop function f1;
+set timestamp=0;
+SET time_zone=DEFAULT;
+
+--echo End of 10.0 tests
diff --git a/mysql-test/t/view.test b/mysql-test/t/view.test
index eb905b5c4df..ae78d5504cf 100644
--- a/mysql-test/t/view.test
+++ b/mysql-test/t/view.test
@@ -5335,6 +5335,16 @@ DROP FUNCTION f1;
DROP VIEW v1;
DROP TABLE t1, t2;
+
+create view v1 as select 1;
+
+--let $MYSQLD_DATADIR= `select @@datadir`
+--let SEARCH_FILE= $MYSQLD_DATADIR/test/v1.frm
+--let SEARCH_PATTERN=mariadb-version
+--source include/search_pattern_in_file.inc
+
+drop view v1;
+
--echo # -----------------------------------------------------------------
--echo # -- End of 5.5 tests.
--echo # -----------------------------------------------------------------
diff --git a/mysql-test/t/windows.test b/mysql-test/t/windows.test
index b7d31948d23..617daba6b8e 100644
--- a/mysql-test/t/windows.test
+++ b/mysql-test/t/windows.test
@@ -98,3 +98,19 @@ deallocate prepare abc;
SELECT VARIABLE_NAME FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
WHERE VARIABLE_NAME = 'socket';
+
+--echo #
+--echo # Bug#16581605: REPLACE.EXE UTILITY IS BROKEN IN 5.5
+--echo #
+
+# Creating a temporary text file.
+--write_file $MYSQL_TMP_DIR/bug16581605.txt
+abc
+def
+EOF
+
+#REPLACE.EXE UTILITY will work fine after the fix.
+--exec $REPLACE abc xyz < $MYSQL_TMP_DIR/bug16581605.txt
+
+#Cleanup
+remove_file $MYSQL_TMP_DIR/bug16581605.txt;
diff --git a/mysys/mf_keycache.c b/mysys/mf_keycache.c
index 5505693ce2c..c0cd1594e72 100644
--- a/mysys/mf_keycache.c
+++ b/mysys/mf_keycache.c
@@ -1020,11 +1020,11 @@ void end_simple_key_cache(SIMPLE_KEY_CACHE_CB *keycache, my_bool cleanup)
*/
static void link_into_queue(KEYCACHE_WQUEUE *wqueue,
- struct st_my_thread_var *thread)
+ struct st_my_thread_var *thread)
{
struct st_my_thread_var *last;
-
DBUG_ASSERT(!thread->next && !thread->prev);
+
if (! (last= wqueue->last_thread))
{
/* Queue is empty */
@@ -1033,10 +1033,15 @@ static void link_into_queue(KEYCACHE_WQUEUE *wqueue,
}
else
{
- thread->prev= last->next->prev;
- last->next->prev= &thread->next;
- thread->next= last->next;
- last->next= thread;
+ DBUG_ASSERT(last->next->prev == &last->next);
+ /* Add backlink to previous element */
+ thread->prev= last->next->prev;
+ /* Fix first in list to point backwords to current */
+ last->next->prev= &thread->next;
+ /* Next should point to the first element in list */
+ thread->next= last->next;
+ /* Fix old element to point to new one */
+ last->next= thread;
}
wqueue->last_thread= thread;
}
@@ -1057,17 +1062,22 @@ static void link_into_queue(KEYCACHE_WQUEUE *wqueue,
*/
static void unlink_from_queue(KEYCACHE_WQUEUE *wqueue,
- struct st_my_thread_var *thread)
+ struct st_my_thread_var *thread)
{
KEYCACHE_DBUG_PRINT("unlink_from_queue", ("thread %ld", thread->id));
DBUG_ASSERT(thread->next && thread->prev);
+
if (thread->next == thread)
+ {
/* The queue contains only one member */
wqueue->last_thread= NULL;
+ }
else
{
+ /* Remove current element from list */
thread->next->prev= thread->prev;
- *thread->prev=thread->next;
+ *thread->prev= thread->next;
+ /* If first element, change list pointer to point to previous element */
if (wqueue->last_thread == thread)
wqueue->last_thread= STRUCT_PTR(struct st_my_thread_var, next,
thread->prev);
@@ -1111,10 +1121,10 @@ static void wait_on_queue(KEYCACHE_WQUEUE *wqueue,
{
struct st_my_thread_var *last;
struct st_my_thread_var *thread= my_thread_var;
-
- /* Add to queue. */
DBUG_ASSERT(!thread->next);
DBUG_ASSERT(!thread->prev); /* Not required, but must be true anyway. */
+
+ /* Add to queue. */
if (! (last= wqueue->last_thread))
thread->next= thread;
else
@@ -1125,7 +1135,7 @@ static void wait_on_queue(KEYCACHE_WQUEUE *wqueue,
wqueue->last_thread= thread;
/*
- Wait until thread is removed from queue by the signalling thread.
+ Wait until thread is removed from queue by the signaling thread.
The loop protects against stray signals.
*/
do
@@ -1163,10 +1173,11 @@ static void release_whole_queue(KEYCACHE_WQUEUE *wqueue)
if (!(last= wqueue->last_thread))
return;
- next= last->next;
+ next= last->next; /* First (oldest) element */
do
{
thread=next;
+ DBUG_ASSERT(thread);
KEYCACHE_DBUG_PRINT("release_whole_queue: signal",
("thread %ld", thread->id));
/* Signal the thread. */
@@ -1359,7 +1370,7 @@ static void link_block(SIMPLE_KEY_CACHE_CB *keycache, BLOCK_LINK *block,
keycache->waiting_for_block.last_thread;
struct st_my_thread_var *first_thread= last_thread->next;
struct st_my_thread_var *next_thread= first_thread;
- HASH_LINK *hash_link= (HASH_LINK *) first_thread->opt_info;
+ HASH_LINK *hash_link= (HASH_LINK *) first_thread->keycache_link;
struct st_my_thread_var *thread;
do
{
@@ -1369,7 +1380,7 @@ static void link_block(SIMPLE_KEY_CACHE_CB *keycache, BLOCK_LINK *block,
We notify about the event all threads that ask
for the same page as the first thread in the queue
*/
- if ((HASH_LINK *) thread->opt_info == hash_link)
+ if ((HASH_LINK *) thread->keycache_link == hash_link)
{
KEYCACHE_DBUG_PRINT("link_block: signal", ("thread %ld", thread->id));
keycache_pthread_cond_signal(&thread->suspend);
@@ -1703,7 +1714,7 @@ static void unlink_hash(SIMPLE_KEY_CACHE_CB *keycache, HASH_LINK *hash_link)
keycache->waiting_for_hash_link.last_thread;
struct st_my_thread_var *first_thread= last_thread->next;
struct st_my_thread_var *next_thread= first_thread;
- KEYCACHE_PAGE *first_page= (KEYCACHE_PAGE *) (first_thread->opt_info);
+ KEYCACHE_PAGE *first_page= (KEYCACHE_PAGE *) (first_thread->keycache_link);
struct st_my_thread_var *thread;
hash_link->file= first_page->file;
@@ -1712,7 +1723,7 @@ static void unlink_hash(SIMPLE_KEY_CACHE_CB *keycache, HASH_LINK *hash_link)
{
KEYCACHE_PAGE *page;
thread= next_thread;
- page= (KEYCACHE_PAGE *) thread->opt_info;
+ page= (KEYCACHE_PAGE *) thread->keycache_link;
next_thread= thread->next;
/*
We notify about the event all threads that ask
@@ -1801,13 +1812,13 @@ restart:
KEYCACHE_DBUG_PRINT("get_hash_link", ("waiting"));
page.file= file;
page.filepos= filepos;
- thread->opt_info= (void *) &page;
+ thread->keycache_link= (void *) &page;
link_into_queue(&keycache->waiting_for_hash_link, thread);
KEYCACHE_DBUG_PRINT("get_hash_link: wait",
("suspend thread %ld", thread->id));
keycache_pthread_cond_wait(&thread->suspend,
&keycache->cache_lock);
- thread->opt_info= NULL;
+ thread->keycache_link= NULL;
goto restart;
}
hash_link->file= file;
@@ -1965,7 +1976,7 @@ restart:
for another file/pos.
*/
thread= my_thread_var;
- thread->opt_info= (void *) hash_link;
+ thread->keycache_link= (void *) hash_link;
link_into_queue(&keycache->waiting_for_block, thread);
do
{
@@ -1974,7 +1985,7 @@ restart:
keycache_pthread_cond_wait(&thread->suspend,
&keycache->cache_lock);
} while (thread->next);
- thread->opt_info= NULL;
+ thread->keycache_link= NULL;
/*
A block should now be assigned to the hash_link. But it may
still need to be evicted. Anyway, we should re-check the
@@ -2312,7 +2323,7 @@ restart:
*/
struct st_my_thread_var *thread= my_thread_var;
- thread->opt_info= (void *) hash_link;
+ thread->keycache_link= (void *) hash_link;
link_into_queue(&keycache->waiting_for_block, thread);
do
{
@@ -2322,7 +2333,7 @@ restart:
&keycache->cache_lock);
}
while (thread->next);
- thread->opt_info= NULL;
+ thread->keycache_link= NULL;
/* Assert that block has a request registered. */
DBUG_ASSERT(hash_link->block->requests);
/* Assert that block is not in LRU ring. */
@@ -4577,7 +4588,7 @@ static void keycache_dump(SIMPLE_KEY_CACHE_CB *keycache)
do
{
thread=thread->next;
- page= (KEYCACHE_PAGE *) thread->opt_info;
+ page= (KEYCACHE_PAGE *) thread->keycache_link;
fprintf(keycache_dump_file,
"thread:%u, (file,filepos)=(%u,%lu)\n",
thread->id,(uint) page->file,(ulong) page->filepos);
@@ -4593,7 +4604,7 @@ static void keycache_dump(SIMPLE_KEY_CACHE_CB *keycache)
do
{
thread=thread->next;
- hash_link= (HASH_LINK *) thread->opt_info;
+ hash_link= (HASH_LINK *) thread->keycache_link;
fprintf(keycache_dump_file,
"thread:%u hash_link:%u (file,filepos)=(%u,%lu)\n",
thread->id, (uint) HASH_LINK_NUMBER(hash_link),
diff --git a/mysys/my_context.c b/mysys/my_context.c
index 4d9f1a1a12f..60c0014b3b9 100644
--- a/mysys/my_context.c
+++ b/mysys/my_context.c
@@ -727,33 +727,36 @@ my_context_continue(struct my_context *c)
#ifdef MY_CONTEXT_DISABLE
int
-my_context_continue(struct my_context *c)
+my_context_continue(struct my_context *c __attribute__((unused)))
{
return -1;
}
int
-my_context_spawn(struct my_context *c, void (*f)(void *), void *d)
+my_context_spawn(struct my_context *c __attribute__((unused)),
+ void (*f)(void *) __attribute__((unused)),
+ void *d __attribute__((unused)))
{
return -1;
}
int
-my_context_yield(struct my_context *c)
+my_context_yield(struct my_context *c __attribute__((unused)))
{
return -1;
}
int
-my_context_init(struct my_context *c, size_t stack_size)
+my_context_init(struct my_context *c __attribute__((unused)),
+ size_t stack_size __attribute__((unused)))
{
return -1; /* Out of memory */
}
void
-my_context_destroy(struct my_context *c)
+my_context_destroy(struct my_context *c __attribute__((unused)))
{
}
diff --git a/mysys/my_wincond.c b/mysys/my_wincond.c
index 6674a5d394d..c761064dd96 100644
--- a/mysys/my_wincond.c
+++ b/mysys/my_wincond.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -328,26 +328,4 @@ int pthread_attr_destroy(pthread_attr_t *connect_att)
return 0;
}
-/****************************************************************************
-** Fix localtime_r() to be a bit safer
-****************************************************************************/
-
-struct tm *localtime_r(const time_t *timep,struct tm *tmp)
-{
- if (*timep == (time_t) -1) /* This will crash win32 */
- {
- bzero(tmp,sizeof(*tmp));
- }
- else
- {
- struct tm *res=localtime(timep);
- if (!res) /* Wrong date */
- {
- bzero(tmp,sizeof(*tmp)); /* Keep things safe */
- return 0;
- }
- *tmp= *res;
- }
- return tmp;
-}
#endif /* __WIN__ */
diff --git a/mysys/thr_lock.c b/mysys/thr_lock.c
index 59c08240bca..cd061db76c7 100644
--- a/mysys/thr_lock.c
+++ b/mysys/thr_lock.c
@@ -343,7 +343,7 @@ static void check_locks(THR_LOCK *lock, const char *where,
found_errors++;
fprintf(stderr,
"Warning at '%s': Write lock %d waiting while no exclusive read locks\n",where,(int) lock->write_wait.data->type);
- DBUG_PRINT("warning", ("Warning at '%s': Write lock %d waiting while no exclusive read locks\n",where,(int) lock->write_wait.data->type));
+ DBUG_PRINT("warning", ("Warning at '%s': Write lock %d waiting while no exclusive read locks",where,(int) lock->write_wait.data->type));
}
}
}
@@ -363,7 +363,7 @@ static void check_locks(THR_LOCK *lock, const char *where,
fprintf(stderr,
"Warning at '%s': Found TL_WRITE_CONCURRENT_INSERT lock mixed with other write lock: %d\n",
where, data->type);
- DBUG_PRINT("warning", ("Warning at '%s': Found TL_WRITE_CONCURRENT_INSERT lock mixed with other write lock: %d\n",
+ DBUG_PRINT("warning", ("Warning at '%s': Found TL_WRITE_CONCURRENT_INSERT lock mixed with other write lock: %d",
where, data->type));
break;
}
@@ -379,7 +379,7 @@ static void check_locks(THR_LOCK *lock, const char *where,
fprintf(stderr,
"Warning at '%s': Found WRITE_ALLOW_WRITE lock waiting for WRITE_ALLOW_WRITE lock\n",
where);
- DBUG_PRINT("warning", ("Warning at '%s': Found WRITE_ALLOW_WRITE lock waiting for WRITE_ALLOW_WRITE lock\n",
+ DBUG_PRINT("warning", ("Warning at '%s': Found WRITE_ALLOW_WRITE lock waiting for WRITE_ALLOW_WRITE lock",
where));
}
@@ -402,7 +402,7 @@ static void check_locks(THR_LOCK *lock, const char *where,
"Warning at '%s' for lock: %d: Found lock of type %d that is write and read locked. Read_no_write_count: %d\n",
where, (int) type, lock->write.data->type,
lock->read_no_write_count);
- DBUG_PRINT("warning",("At '%s' for lock %d: Found lock of type %d that is write and read locked\n",
+ DBUG_PRINT("warning",("At '%s' for lock %d: Found lock of type %d that is write and read locked",
where, (int) type,
lock->write.data->type));
}
@@ -959,7 +959,8 @@ thr_lock(THR_LOCK_DATA *data, THR_LOCK_INFO *owner, ulong lock_wait_timeout)
The idea is to allow us to get a lock at once if we already have
a write lock or if there is no pending write locks and if all
write locks are of the same type and are either
- TL_WRITE_ALLOW_WRITE or TL_WRITE_CONCURRENT_INSERT
+ TL_WRITE_ALLOW_WRITE or TL_WRITE_CONCURRENT_INSERT and
+ there is no TL_READ_NO_INSERT lock.
Note that, since lock requests for the same table are sorted in
such way that requests with higher thr_lock_type value come first
@@ -976,7 +977,7 @@ thr_lock(THR_LOCK_DATA *data, THR_LOCK_INFO *owner, ulong lock_wait_timeout)
situation.
**) The exceptions are situations when:
- when old lock type is TL_WRITE_DELAYED
- But these should never happen within MySQL.
+ But these should never happen within MariaDB.
Therefore it is OK to allow acquiring write lock on the table if
this thread already holds some write lock on it.
@@ -992,9 +993,11 @@ thr_lock(THR_LOCK_DATA *data, THR_LOCK_INFO *owner, ulong lock_wait_timeout)
if (((lock_type == TL_WRITE_ALLOW_WRITE ||
(lock_type == TL_WRITE_CONCURRENT_INSERT &&
- lock->allow_multiple_concurrent_insert)) &&
+ lock->allow_multiple_concurrent_insert &&
+ !lock->read_no_write_count)) &&
! lock->write_wait.data &&
- lock->write.data->type == lock_type) ||
+ lock->write.data->type == lock_type &&
+ ! lock->read_no_write_count) ||
has_old_lock(lock->write.data, data->owner))
{
DBUG_PRINT("info", ("write_wait.data: 0x%lx old_type: %d",
diff --git a/packaging/rpm-oel/mysql-systemd-start b/packaging/rpm-oel/mysql-systemd-start
index 9cb2a25c990..8670f889574 100644
--- a/packaging/rpm-oel/mysql-systemd-start
+++ b/packaging/rpm-oel/mysql-systemd-start
@@ -8,10 +8,19 @@
# post mode : ping server until answer is received
#
+get_option () {
+ local section=$1
+ local option=$2
+ local default=$3
+ ret=$(/usr/bin/my_print_defaults $section | grep '^--'${option}'=' | cut -d= -f2-)
+ [ -z $ret ] && ret=$default
+ echo $ret
+}
+
install_db () {
# Note: something different than datadir=/var/lib/mysql requires SELinux policy changes (in enforcing mode)
- datadir=$(/usr/bin/my_print_defaults server mysqld | grep '^--datadir=' | sed -n 's/--datadir=//p')
-
+ datadir=$(get_option mysqld datadir "/var/lib/mysql")
+
# Restore log, dir, perms and SELinux contexts
[ -d "$datadir" ] || install -d -m 0755 -omysql -gmysql "$datadir" || exit 1
log=/var/log/mysqld.log
@@ -35,9 +44,16 @@ pinger () {
# Wait for ping to answer to signal startup completed,
# might take a while in case of e.g. crash recovery
# MySQL systemd service will timeout script if no answer
+ datadir=$(get_option mysqld datadir "/var/lib/mysql")
+ socket=$(get_option mysqld socket "$datadir/mysql.sock")
+ case $socket in
+ /*) adminsocket="$socket" ;;
+ *) adminsocket="$datadir/$socket" ;;
+ esac
+
while /bin/true ; do
sleep 1
- mysqladmin ping >/dev/null 2>&1 && break
+ mysqladmin --no-defaults --socket="$adminsocket" --user=UNKNOWN_MYSQL_USER ping >/dev/null 2>&1 && break
done
exit 0
}
diff --git a/packaging/rpm-oel/mysql.init b/packaging/rpm-oel/mysql.init
index d6f8f023850..79c8a8daa7d 100644
--- a/packaging/rpm-oel/mysql.init
+++ b/packaging/rpm-oel/mysql.init
@@ -50,11 +50,15 @@ errlogfile="$result"
get_mysql_option mysqld_safe pid-file "/var/run/mysqld/mysqld.pid"
mypidfile="$result"
+case $socketfile in
+ /*) adminsocket="$socketfile" ;;
+ *) adminsocket="$datadir/$socketfile" ;;
+esac
start(){
[ -x $exec ] || exit 5
# check to see if it's already running
- RESPONSE=`/usr/bin/mysqladmin --socket="$socketfile" --user=UNKNOWN_MYSQL_USER ping 2>&1`
+ RESPONSE=$(/usr/bin/mysqladmin --no-defaults --socket="$adminsocket" --user=UNKNOWN_MYSQL_USER ping 2>&1)
if [ $? = 0 ]; then
# already running, do nothing
action $"Starting $prog: " /bin/true
@@ -107,7 +111,7 @@ start(){
ret=0
TIMEOUT="$STARTTIMEOUT"
while [ $TIMEOUT -gt 0 ]; do
- RESPONSE=`/usr/bin/mysqladmin --socket="$socketfile" --user=UNKNOWN_MYSQL_USER ping 2>&1` && break
+ RESPONSE=$(/usr/bin/mysqladmin --no-defaults --socket="$adminsocket" --user=UNKNOWN_MYSQL_USER ping 2>&1) && break
echo "$RESPONSE" | grep -q "Access denied for user" && break
if ! /bin/kill -0 $safe_pid 2>/dev/null; then
echo "MySQL Daemon failed to start."
diff --git a/scripts/CMakeLists.txt b/scripts/CMakeLists.txt
index fe98751d009..e67327531d9 100644
--- a/scripts/CMakeLists.txt
+++ b/scripts/CMakeLists.txt
@@ -90,17 +90,10 @@ IF(MALLOC_LIB)
ENDIF()
IF(CMAKE_GENERATOR MATCHES "Makefiles")
- # Strip maintainer mode options if necessary
- STRING(REPLACE "${MY_MAINTAINER_C_WARNINGS}" "" CFLAGS "${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_RELWITHDEBINFO}")
- STRING(REPLACE "${MY_MAINTAINER_CXX_WARNINGS}" "" CXXFLAGS "${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_RELWITHDEBINFO}")
FOREACH(ARCH ${CMAKE_OSX_ARCHITECTURES})
SET(CFLAGS "${CFLAGS} -arch ${ARCH}")
SET(CXXFLAGS "${CXXFLAGS} -arch ${ARCH}")
ENDFOREACH()
-ELSE()
- # Strip maintainer mode options if necessary
- STRING(REPLACE "${MY_MAINTAINER_C_WARNINGS}" "" CFLAGS "${CMAKE_C_FLAGS_RELWITHDEBINFO}")
- STRING(REPLACE "${MY_MAINTAINER_CXX_WARNINGS}" "" CXXFLAGS "${CMAKE_CXX_FLAGS_RELWITHDEBINFO}")
ENDIF()
IF(UNIX)
diff --git a/scripts/mysql_setpermission.sh b/scripts/mysql_setpermission.sh
index f23011a5ae6..48f0b09b566 100644
--- a/scripts/mysql_setpermission.sh
+++ b/scripts/mysql_setpermission.sh
@@ -264,13 +264,13 @@ sub addall {
foreach $host (@hosts) {
# user privileges: SELECT
if (($todo == 2) || ($todo == 3)) {
- $sth = $dbh->do("GRANT SELECT ON $db.* TO $user@\"$host\" IDENTIFIED BY \'$pass\'") || die $dbh->errstr;
+ $sth = $dbh->do("GRANT SELECT ON $db.* TO \'$user\'@\'$host\' IDENTIFIED BY \'$pass\'") || die $dbh->errstr;
} elsif ($todo == 4) {
# user privileges: SELECT,INSERT,UPDATE,DELETE
- $sth = $dbh->do("GRANT SELECT,INSERT,UPDATE,DELETE ON $db.* TO $user@\"$host\" IDENTIFIED BY \'$pass\'") || die $dbh->errstr;
+ $sth = $dbh->do("GRANT SELECT,INSERT,UPDATE,DELETE ON $db.* TO \'$user\'@\'$host\' IDENTIFIED BY \'$pass\'") || die $dbh->errstr;
} elsif ($todo == 5) {
# user privileges: SELECT,INSERT,UPDATE,DELETE,CREATE,DROP,INDEX,LOCK TABLES,CREATE TEMPORARY TABLES
- $sth = $dbh->do("GRANT SELECT,INSERT,UPDATE,DELETE,CREATE,DROP,INDEX,LOCK TABLES,CREATE TEMPORARY TABLES ON $db.* TO $user@\"$host\" IDENTIFIED BY \'$pass\'") || die $dbh->errstr;
+ $sth = $dbh->do("GRANT SELECT,INSERT,UPDATE,DELETE,CREATE,DROP,INDEX,LOCK TABLES,CREATE TEMPORARY TABLES ON $db.* TO \'$user\'@\'$host\' IDENTIFIED BY \'$pass\'") || die $dbh->errstr;
} elsif ($todo == 6) {
# all privileges
$sth = $dbh->do("GRANT ALL ON $db.* TO \'$user\'\@\'$host\' IDENTIFIED BY \'$pass\'") || die $dbh->errstr;
diff --git a/scripts/mysql_system_tables.sql b/scripts/mysql_system_tables.sql
index a53a73656e7..bc89ae82dbb 100644
--- a/scripts/mysql_system_tables.sql
+++ b/scripts/mysql_system_tables.sql
@@ -80,7 +80,7 @@ CREATE TABLE IF NOT EXISTS time_zone_transition_type ( Time_zone_id int unsign
CREATE TABLE IF NOT EXISTS time_zone_leap_second ( Transition_time bigint signed NOT NULL, Correction int signed NOT NULL, PRIMARY KEY TranTime (Transition_time) ) engine=MyISAM CHARACTER SET utf8 comment='Leap seconds information for time zones';
-CREATE TABLE IF NOT EXISTS proc (db char(64) collate utf8_bin DEFAULT '' NOT NULL, name char(64) DEFAULT '' NOT NULL, type enum('FUNCTION','PROCEDURE') NOT NULL, specific_name char(64) DEFAULT '' NOT NULL, language enum('SQL') DEFAULT 'SQL' NOT NULL, sql_data_access enum( 'CONTAINS_SQL', 'NO_SQL', 'READS_SQL_DATA', 'MODIFIES_SQL_DATA') DEFAULT 'CONTAINS_SQL' NOT NULL, is_deterministic enum('YES','NO') DEFAULT 'NO' NOT NULL, security_type enum('INVOKER','DEFINER') DEFAULT 'DEFINER' NOT NULL, param_list blob NOT NULL, returns longblob DEFAULT '' NOT NULL, body longblob NOT NULL, definer char(141) collate utf8_bin DEFAULT '' NOT NULL, created timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, modified timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', sql_mode set( 'REAL_AS_FLOAT', 'PIPES_AS_CONCAT', 'ANSI_QUOTES', 'IGNORE_SPACE', 'IGNORE_BAD_TABLE_OPTIONS', 'ONLY_FULL_GROUP_BY', 'NO_UNSIGNED_SUBTRACTION', 'NO_DIR_IN_CREATE', 'POSTGRESQL', 'ORACLE', 'MSSQL', 'DB2', 'MAXDB', 'NO_KEY_OPTIONS', 'NO_TABLE_OPTIONS', 'NO_FIELD_OPTIONS', 'MYSQL323', 'MYSQL40', 'ANSI', 'NO_AUTO_VALUE_ON_ZERO', 'NO_BACKSLASH_ESCAPES', 'STRICT_TRANS_TABLES', 'STRICT_ALL_TABLES', 'NO_ZERO_IN_DATE', 'NO_ZERO_DATE', 'INVALID_DATES', 'ERROR_FOR_DIVISION_BY_ZERO', 'TRADITIONAL', 'NO_AUTO_CREATE_USER', 'HIGH_NOT_PRECEDENCE', 'NO_ENGINE_SUBSTITUTION', 'PAD_CHAR_TO_FULL_LENGTH') DEFAULT '' NOT NULL, comment text collate utf8_bin NOT NULL, character_set_client char(32) collate utf8_bin, collation_connection char(32) collate utf8_bin, db_collation char(32) collate utf8_bin, body_utf8 longblob, PRIMARY KEY (db,name,type)) engine=MyISAM character set utf8 comment='Stored Procedures';
+CREATE TABLE IF NOT EXISTS proc (db char(64) collate utf8_bin DEFAULT '' NOT NULL, name char(64) DEFAULT '' NOT NULL, type enum('FUNCTION','PROCEDURE') NOT NULL, specific_name char(64) DEFAULT '' NOT NULL, language enum('SQL') DEFAULT 'SQL' NOT NULL, sql_data_access enum( 'CONTAINS_SQL', 'NO_SQL', 'READS_SQL_DATA', 'MODIFIES_SQL_DATA') DEFAULT 'CONTAINS_SQL' NOT NULL, is_deterministic enum('YES','NO') DEFAULT 'NO' NOT NULL, security_type enum('INVOKER','DEFINER') DEFAULT 'DEFINER' NOT NULL, param_list blob NOT NULL, returns longblob NOT NULL, body longblob NOT NULL, definer char(141) collate utf8_bin DEFAULT '' NOT NULL, created timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, modified timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', sql_mode set( 'REAL_AS_FLOAT', 'PIPES_AS_CONCAT', 'ANSI_QUOTES', 'IGNORE_SPACE', 'IGNORE_BAD_TABLE_OPTIONS', 'ONLY_FULL_GROUP_BY', 'NO_UNSIGNED_SUBTRACTION', 'NO_DIR_IN_CREATE', 'POSTGRESQL', 'ORACLE', 'MSSQL', 'DB2', 'MAXDB', 'NO_KEY_OPTIONS', 'NO_TABLE_OPTIONS', 'NO_FIELD_OPTIONS', 'MYSQL323', 'MYSQL40', 'ANSI', 'NO_AUTO_VALUE_ON_ZERO', 'NO_BACKSLASH_ESCAPES', 'STRICT_TRANS_TABLES', 'STRICT_ALL_TABLES', 'NO_ZERO_IN_DATE', 'NO_ZERO_DATE', 'INVALID_DATES', 'ERROR_FOR_DIVISION_BY_ZERO', 'TRADITIONAL', 'NO_AUTO_CREATE_USER', 'HIGH_NOT_PRECEDENCE', 'NO_ENGINE_SUBSTITUTION', 'PAD_CHAR_TO_FULL_LENGTH') DEFAULT '' NOT NULL, comment text collate utf8_bin NOT NULL, character_set_client char(32) collate utf8_bin, collation_connection char(32) collate utf8_bin, db_collation char(32) collate utf8_bin, body_utf8 longblob, PRIMARY KEY (db,name,type)) engine=MyISAM character set utf8 comment='Stored Procedures';
CREATE TABLE IF NOT EXISTS procs_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(80) binary DEFAULT '' NOT NULL, Routine_name char(64) COLLATE utf8_general_ci DEFAULT '' NOT NULL, Routine_type enum('FUNCTION','PROCEDURE') NOT NULL, Grantor char(141) DEFAULT '' NOT NULL, Proc_priv set('Execute','Alter Routine','Grant') COLLATE utf8_general_ci DEFAULT '' NOT NULL, Timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (Host,Db,User,Routine_name,Routine_type), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Procedure privileges';
diff --git a/scripts/mysqld_safe.sh b/scripts/mysqld_safe.sh
index dc831434dbe..3e6f6fc0e02 100644
--- a/scripts/mysqld_safe.sh
+++ b/scripts/mysqld_safe.sh
@@ -670,6 +670,7 @@ then
then
# User explicitly asked for syslog, so warn that it isn't used
log_error "Can't log to error log and syslog at the same time. Remove all --log-error configuration options for --syslog to take effect."
+ want_syslog=0
fi
# Log to err_log file
diff --git a/sql-bench/test-table-elimination.sh b/sql-bench/test-table-elimination.sh
index 5b494688bec..7772cab42b0 100755
--- a/sql-bench/test-table-elimination.sh
+++ b/sql-bench/test-table-elimination.sh
@@ -1,4 +1,4 @@
-#!@PERL@
+#!/usr/bin/perl
# Test of table elimination feature
use Cwd;
diff --git a/sql-common/client.c b/sql-common/client.c
index 89f6a8434a5..20ccd73596c 100644
--- a/sql-common/client.c
+++ b/sql-common/client.c
@@ -113,7 +113,7 @@ my_bool net_flush(NET *net);
#define native_password_plugin_name "mysql_native_password"
#define old_password_plugin_name "mysql_old_password"
-
+uint mariadb_deinitialize_ssl= 1;
uint mysql_port=0;
char *mysql_unix_port= 0;
const char *unknown_sqlstate= "HY000";
diff --git a/sql/debug_sync.cc b/sql/debug_sync.cc
index 2980ecd7dbf..5802d726aa2 100644
--- a/sql/debug_sync.cc
+++ b/sql/debug_sync.cc
@@ -1394,8 +1394,9 @@ static void debug_sync_execute(THD *thd, st_debug_sync_action *action)
if (action->wait_for.length())
{
- mysql_mutex_t *old_mutex;
+ mysql_mutex_t *old_mutex= NULL;
mysql_cond_t *old_cond= NULL;
+ bool restore_current_mutex;
int error= 0;
struct timespec abstime;
@@ -1412,11 +1413,12 @@ static void debug_sync_execute(THD *thd, st_debug_sync_action *action)
{
old_mutex= thd->mysys_var->current_mutex;
old_cond= thd->mysys_var->current_cond;
+ restore_current_mutex = true;
thd->mysys_var->current_mutex= &debug_sync_global.ds_mutex;
thd->mysys_var->current_cond= &debug_sync_global.ds_cond;
}
else
- old_mutex= NULL;
+ restore_current_mutex = false;
set_timespec(abstime, action->timeout);
DBUG_EXECUTE("debug_sync_exec", {
@@ -1476,7 +1478,7 @@ static void debug_sync_execute(THD *thd, st_debug_sync_action *action)
is locked. (See comment in THD::exit_cond().)
*/
mysql_mutex_unlock(&debug_sync_global.ds_mutex);
- if (old_mutex)
+ if (restore_current_mutex)
{
mysql_mutex_lock(&thd->mysys_var->mutex);
thd->mysys_var->current_mutex= old_mutex;
diff --git a/sql/field.cc b/sql/field.cc
index ad59b4a63bd..e7e046a8458 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -4899,7 +4899,7 @@ void Field_timestamp::set_explicit_default(Item *value)
{
if (((value->type() == Item::DEFAULT_VALUE_ITEM &&
!((Item_default_value*)value)->arg) ||
- (!maybe_null() && value->is_null())))
+ (!maybe_null() && value->null_value)))
return;
set_has_explicit_value();
}
diff --git a/sql/field.h b/sql/field.h
index dd603d41bf7..0390e95f954 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -656,21 +656,28 @@ public:
inline bool is_null(my_ptrdiff_t row_offset= 0) const
{
/*
+ If the field is NULLable, it returns NULLity based
+ on null_ptr[row_offset] value. Otherwise it returns
+ NULL flag depending on TABLE::null_row value.
+
The table may have been marked as containing only NULL values
for all fields if it is a NULL-complemented row of an OUTER JOIN
or if the query is an implicitly grouped query (has aggregate
functions but no GROUP BY clause) with no qualifying rows. If
- this is the case (in which TABLE::null_row is true), the field
- is considered to be NULL.
+ this is the case (in which TABLE::null_row is true) and the
+ field is not nullable, the field is considered to be NULL.
+
+ Do not change the order of testing. Fields may be associated
+ with a TABLE object without being part of the current row.
+ For NULL value check to work for these fields, they must
+ have a valid null_ptr, and this pointer must be checked before
+ TABLE::null_row.
+
Note that if a table->null_row is set then also all null_bits are
set for the row.
-
- Otherwise, if the field is NULLable, it has a valid null_ptr
- pointer, and its NULLity is recorded in the "null_bit" bit of
- null_ptr[row_offset].
*/
- return (table->null_row ? TRUE :
- null_ptr ? MY_TEST(null_ptr[row_offset] & null_bit) : 0);
+ return real_maybe_null() ?
+ MY_TEST(null_ptr[row_offset] & null_bit) : table->null_row;
}
inline bool is_real_null(my_ptrdiff_t row_offset= 0) const
{ return null_ptr ? (null_ptr[row_offset] & null_bit ? 1 : 0) : 0; }
diff --git a/sql/filesort.cc b/sql/filesort.cc
index 509a7f8e9b3..027437fca67 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -166,8 +166,6 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
TABLE_LIST *tab= table->pos_in_table_list;
Item_subselect *subselect= tab ? tab->containing_subselect() : 0;
- *found_rows= HA_POS_ERROR;
-
MYSQL_FILESORT_START(table->s->db.str, table->s->table_name.str);
DEBUG_SYNC(thd, "filesort_start");
@@ -190,6 +188,7 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
my_b_clear(&buffpek_pointers);
buffpek=0;
error= 1;
+ *found_rows= HA_POS_ERROR;
param.init_for_filesort(sortlength(thd, sortorder, s_length,
&multi_byte_charset),
@@ -690,8 +689,7 @@ static ha_rows find_all_keys(Sort_param *param, SQL_SELECT *select,
ref_pos= ref_buff;
quick_select=select && select->quick;
record=0;
- if (pq) // don't count unless pq is used
- *found_rows= 0;
+ *found_rows= 0;
flag= ((file->ha_table_flags() & HA_REC_NOT_IN_SEQ) || quick_select);
if (flag)
ref_pos= &file->ref[0];
@@ -814,14 +812,9 @@ static ha_rows find_all_keys(Sort_param *param, SQL_SELECT *select,
if (write_record)
{
+ ++(*found_rows);
if (pq)
{
- /*
- only count rows when pq is used - otherwise there might be
- other filters *after* the filesort, we don't know the final row
- count here
- */
- (*found_rows)++;
pq->push(ref_pos);
idx= pq->num_elements();
}
diff --git a/sql/handler.cc b/sql/handler.cc
index 85129f11c63..234c9408b74 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -86,9 +86,7 @@ static const LEX_STRING sys_table_aliases[]=
};
const char *ha_row_type[] = {
- "", "FIXED", "DYNAMIC", "COMPRESSED", "REDUNDANT", "COMPACT",
- "PAGE",
- "?","?","?"
+ "", "FIXED", "DYNAMIC", "COMPRESSED", "REDUNDANT", "COMPACT", "PAGE"
};
const char *tx_isolation_names[] =
diff --git a/sql/handler.h b/sql/handler.h
index d5a371027f6..3ad88f8b450 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -453,8 +453,10 @@ enum legacy_db_type
enum row_type { ROW_TYPE_NOT_USED=-1, ROW_TYPE_DEFAULT, ROW_TYPE_FIXED,
ROW_TYPE_DYNAMIC, ROW_TYPE_COMPRESSED,
- ROW_TYPE_REDUNDANT, ROW_TYPE_COMPACT,
- ROW_TYPE_PAGE };
+ ROW_TYPE_REDUNDANT, ROW_TYPE_COMPACT, ROW_TYPE_PAGE };
+
+/* not part of the enum, so that it shouldn't be in switch(row_type) */
+#define ROW_TYPE_MAX ((uint)ROW_TYPE_PAGE + 1)
/* Specifies data storage format for individual columns */
enum column_format_type {
@@ -1397,6 +1399,9 @@ static inline sys_var *find_hton_sysvar(handlerton *hton, st_mysql_sys_var *var)
#define HTON_NO_BINLOG_ROW_OPT (1 << 9)
#define HTON_SUPPORTS_EXTENDED_KEYS (1 <<10) //supports extended keys
+// MySQL compatibility. Unused.
+#define HTON_SUPPORTS_FOREIGN_KEYS (1 << 0) //Foreign key constraint supported.
+
class Ha_trx_info;
struct THD_TRANS
@@ -1578,7 +1583,7 @@ class partition_info;
struct st_partition_iter;
-enum ha_choice { HA_CHOICE_UNDEF, HA_CHOICE_NO, HA_CHOICE_YES };
+enum ha_choice { HA_CHOICE_UNDEF, HA_CHOICE_NO, HA_CHOICE_YES, HA_CHOICE_MAX };
enum enum_stats_auto_recalc { HA_STATS_AUTO_RECALC_DEFAULT= 0,
HA_STATS_AUTO_RECALC_ON,
diff --git a/sql/item.cc b/sql/item.cc
index 1dd4fc2909f..132cfa2846a 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -885,20 +885,20 @@ Item_ident::Item_ident(THD *thd, Item_ident *item)
void Item_ident::cleanup()
{
DBUG_ENTER("Item_ident::cleanup");
-#ifdef CANT_BE_USED_AS_MEMORY_IS_FREED
- db_name ? db_name : "(null)",
- orig_db_name ? orig_db_name : "(null)",
- table_name ? table_name : "(null)",
- orig_table_name ? orig_table_name : "(null)",
- field_name ? field_name : "(null)",
- orig_field_name ? orig_field_name : "(null)"));
-#endif
+ bool was_fixed= fixed;
Item::cleanup();
db_name= orig_db_name;
table_name= orig_table_name;
field_name= orig_field_name;
/* Store if this Item was depended */
- can_be_depended= MY_TEST(depended_from);
+ if (was_fixed)
+ {
+ /*
+ We can trust that depended_from set correctly only if this item
+ was fixed
+ */
+ can_be_depended= MY_TEST(depended_from);
+ }
DBUG_VOID_RETURN;
}
diff --git a/sql/item.h b/sql/item.h
index f337db92ef3..13e80639657 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -4458,7 +4458,7 @@ private:
/**
@todo
Implement the is_null() method for this class. Currently calling is_null()
- on any Item_cache object resolves to Item::is_null(), which reutns FALSE
+ on any Item_cache object resolves to Item::is_null(), which returns FALSE
for any value.
*/
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index 62f63501d86..1f1982ffb80 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -621,17 +621,6 @@ int Arg_comparator::set_compare_func(Item_result_field *item, Item_result type)
}
case STRING_RESULT:
{
- /*
- We must set cmp_charset here as we may be called from for an automatic
- generated item, like in natural join
- */
- if (cmp_collation.set((*a)->collation, (*b)->collation) ||
- cmp_collation.derivation == DERIVATION_NONE)
- {
- my_coll_agg_error((*a)->collation, (*b)->collation,
- owner->func_name());
- return 1;
- }
if (cmp_collation.collation == &my_charset_bin)
{
/*
@@ -755,6 +744,37 @@ bool get_mysql_time_from_str(THD *thd, String *str, timestamp_type warn_type,
/**
+ Aggregate comparator argument charsets for comparison.
+ One of the arguments ("a" or "b") can be replaced,
+ typically by Item_string or Item_func_conv_charset.
+
+ @return Aggregation result
+ @retval false - if no conversion is needed,
+ or if one of the arguments was converted
+ @retval true - on error, if arguments are not comparable.
+
+ TODO: get rid of this method eventually and refactor the calling code.
+ Argument conversion should happen on the Item_func level.
+ Arg_comparator should get comparable arguments.
+*/
+bool Arg_comparator::agg_arg_charsets_for_comparison()
+{
+ if (cmp_collation.set((*a)->collation, (*b)->collation, MY_COLL_CMP_CONV) ||
+ cmp_collation.derivation == DERIVATION_NONE)
+ {
+ my_coll_agg_error((*a)->collation, (*b)->collation, owner->func_name());
+ return true;
+ }
+ if (agg_item_set_converter(cmp_collation, owner->func_name(),
+ a, 1, MY_COLL_CMP_CONV, 1) ||
+ agg_item_set_converter(cmp_collation, owner->func_name(),
+ b, 1, MY_COLL_CMP_CONV, 1))
+ return true;
+ return false;
+}
+
+
+/**
Prepare the comparator (set the comparison function) for comparing
items *a1 and *a2 in the context of 'type'.
@@ -781,10 +801,11 @@ int Arg_comparator::set_cmp_func(Item_result_field *owner_arg,
(*a)->result_type() == STRING_RESULT &&
(*b)->result_type() == STRING_RESULT)
{
- DTCollation coll;
- coll.set((*a)->collation.collation);
- if (agg_item_set_converter(coll, owner->func_name(),
- b, 1, MY_COLL_CMP_CONV, 1))
+ /*
+ We must set cmp_collation here as we may be called from for an automatic
+ generated item, like in natural join
+ */
+ if (agg_arg_charsets_for_comparison())
return 1;
}
if (type == INT_RESULT &&
diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h
index d4a1c6b1384..5d11057228c 100644
--- a/sql/item_cmpfunc.h
+++ b/sql/item_cmpfunc.h
@@ -48,6 +48,14 @@ class Arg_comparator: public Sql_alloc
THD *thd;
Item *a_cache, *b_cache; // Cached values of a and b items
// when one of arguments is NULL.
+ int set_compare_func(Item_result_field *owner, Item_result type);
+ inline int set_compare_func(Item_result_field *owner_arg)
+ {
+ return set_compare_func(owner_arg, item_cmp_type((*a)->result_type(),
+ (*b)->result_type()));
+ }
+ bool agg_arg_charsets_for_comparison();
+
public:
DTCollation cmp_collation;
/* Allow owner function to use string buffers. */
@@ -58,12 +66,6 @@ public:
Arg_comparator(Item **a1, Item **a2): a(a1), b(a2), set_null(TRUE),
comparators(0), thd(0), a_cache(0), b_cache(0) {};
- int set_compare_func(Item_result_field *owner, Item_result type);
- inline int set_compare_func(Item_result_field *owner_arg)
- {
- return set_compare_func(owner_arg, item_cmp_type((*a)->result_type(),
- (*b)->result_type()));
- }
int set_cmp_func(Item_result_field *owner_arg,
Item **a1, Item **a2,
Item_result type);
@@ -122,6 +124,8 @@ public:
Item_bool_func() :Item_int_func() {}
Item_bool_func(Item *a) :Item_int_func(a) {}
Item_bool_func(Item *a,Item *b) :Item_int_func(a,b) {}
+ Item_bool_func(Item *a, Item *b, Item *c) :Item_int_func(a, b, c) {}
+ Item_bool_func(List<Item> &list) :Item_int_func(list) { }
Item_bool_func(THD *thd, Item_bool_func *item) :Item_int_func(thd, item) {}
bool is_bool_func() { return 1; }
void fix_length_and_dec() { decimals=0; max_length=1; }
@@ -364,7 +368,7 @@ public:
virtual bool l_op() const { return 1; }
};
-class Item_bool_func2 :public Item_int_func
+class Item_bool_func2 :public Item_bool_func
{ /* Bool with 2 string args */
protected:
Arg_comparator cmp;
@@ -372,7 +376,7 @@ protected:
public:
Item_bool_func2(Item *a,Item *b)
- :Item_int_func(a,b), cmp(tmp_arg, tmp_arg+1),
+ :Item_bool_func(a,b), cmp(tmp_arg, tmp_arg+1),
abort_on_null(FALSE) { sargable= TRUE; }
void fix_length_and_dec();
int set_cmp_func()
@@ -389,14 +393,12 @@ public:
}
bool is_null() { return MY_TEST(args[0]->is_null() || args[1]->is_null()); }
- bool is_bool_func() { return 1; }
CHARSET_INFO *compare_collation() { return cmp.cmp_collation.collation; }
- uint decimal_precision() const { return 1; }
void top_level_item() { abort_on_null= TRUE; }
Arg_comparator *get_comparator() { return &cmp; }
void cleanup()
{
- Item_int_func::cleanup();
+ Item_bool_func::cleanup();
cmp.cleanup();
}
@@ -646,16 +648,16 @@ public:
*/
-class Item_func_opt_neg :public Item_int_func
+class Item_func_opt_neg :public Item_bool_func
{
public:
bool negated; /* <=> the item represents NOT <func> */
bool pred_level; /* <=> [NOT] <func> is used on a predicate level */
public:
Item_func_opt_neg(Item *a, Item *b, Item *c)
- :Item_int_func(a, b, c), negated(0), pred_level(0) {}
+ :Item_bool_func(a, b, c), negated(0), pred_level(0) {}
Item_func_opt_neg(List<Item> &list)
- :Item_int_func(list), negated(0), pred_level(0) {}
+ :Item_bool_func(list), negated(0), pred_level(0) {}
public:
inline void negate() { negated= !negated; }
inline void top_level_item() { pred_level= 1; }
@@ -686,9 +688,7 @@ public:
bool fix_fields(THD *, Item **);
void fix_length_and_dec();
virtual void print(String *str, enum_query_type query_type);
- bool is_bool_func() { return 1; }
CHARSET_INFO *compare_collation() { return cmp_collation.collation; }
- uint decimal_precision() const { return 1; }
bool eval_not_null_tables(uchar *opt_arg);
void fix_after_pullout(st_select_lex *new_parent, Item **ref);
bool count_sargable_conds(uchar *arg);
@@ -1316,7 +1316,6 @@ public:
longlong val_int();
bool fix_fields(THD *, Item **);
void fix_length_and_dec();
- uint decimal_precision() const { return 1; }
void cleanup()
{
uint i;
@@ -1337,7 +1336,6 @@ public:
enum Functype functype() const { return IN_FUNC; }
const char *func_name() const { return " IN "; }
bool nulls_in_row();
- bool is_bool_func() { return 1; }
CHARSET_INFO *compare_collation() { return cmp_collation.collation; }
bool eval_not_null_tables(uchar *opt_arg);
void fix_after_pullout(st_select_lex *new_parent, Item **ref);
@@ -1490,7 +1488,42 @@ public:
longlong val_int();
enum Functype functype() const { return LIKE_FUNC; }
optimize_type select_optimize() const;
- cond_result eq_cmp_result() const { return COND_TRUE; }
+ cond_result eq_cmp_result() const
+ {
+ /**
+ We cannot always rewrite conditions as follows:
+ from: WHERE expr1=const AND expr1 LIKE expr2
+ to: WHERE expr1=const AND const LIKE expr2
+ or
+ from: WHERE expr1=const AND expr2 LIKE expr1
+ to: WHERE expr1=const AND expr2 LIKE const
+
+ because LIKE works differently comparing to the regular "=" operator:
+
+ 1. LIKE performs a stricter one-character-to-one-character comparison
+ and does not recognize contractions and expansions.
+ Replacing "expr1" to "const in LIKE would make the condition
+ stricter in case of a complex collation.
+
+ 2. LIKE does not ignore trailing spaces and thus works differently
+ from the "=" operator in case of "PAD SPACE" collations
+ (which are the majority in MariaDB). So, for "PAD SPACE" collations:
+
+ - expr1=const - ignores trailing spaces
+ - const LIKE expr2 - does not ignore trailing spaces
+ - expr2 LIKE const - does not ignore trailing spaces
+
+ Allow only "binary" for now.
+ It neither ignores trailing spaces nor has contractions/expansions.
+
+ TODO:
+ We could still replace "expr1" to "const" in "expr1 LIKE expr2"
+ in case of a "PAD SPACE" collation, but only if "expr2" has '%'
+ at the end.
+ */
+ return ((Item_func_like *)this)->compare_collation() == &my_charset_bin ?
+ COND_TRUE : COND_OK;
+ }
const char *func_name() const { return "like"; }
bool fix_fields(THD *thd, Item **ref);
void cleanup();
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 1845c6dff98..bd000f6ca62 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -4191,9 +4191,10 @@ void mysql_ull_set_explicit_lock_duration(THD *thd)
When MDL detects a lock wait timeout, it pushes
an error into the statement diagnostics area.
For GET_LOCK(), lock wait timeout is not an error,
- but a special return value (0). NULL is returned in
- case of error.
- Capture and suppress lock wait timeout.
+ but a special return value (0).
+ Similarly, killing get_lock wait is not an error either,
+ but a return value NULL.
+ Capture and suppress lock wait timeouts and kills.
*/
class Lock_wait_timeout_handler: public Internal_error_handler
@@ -4212,7 +4213,7 @@ public:
bool
Lock_wait_timeout_handler::
-handle_condition(THD * /* thd */, uint sql_errno,
+handle_condition(THD *thd, uint sql_errno,
const char * /* sqlstate */,
Sql_condition::enum_warning_level /* level */,
const char *message,
@@ -4223,6 +4224,9 @@ handle_condition(THD * /* thd */, uint sql_errno,
m_lock_wait_timeout= true;
return true; /* condition handled */
}
+ if (thd->is_killed())
+ return true;
+
return false;
}
@@ -4628,6 +4632,11 @@ longlong Item_func_sleep::val_int()
mysql_cond_destroy(&cond);
+ DBUG_EXECUTE_IF("sleep_inject_query_done_debug_sync", {
+ debug_sync_set_action
+ (thd, STRING_WITH_LEN("dispatch_command_end SIGNAL query_done"));
+ };);
+
return MY_TEST(!error); // Return 1 killed
}
diff --git a/sql/item_func.h b/sql/item_func.h
index e40f2d771c6..ce1f2fdd676 100644
--- a/sql/item_func.h
+++ b/sql/item_func.h
@@ -1,7 +1,7 @@
#ifndef ITEM_FUNC_INCLUDED
#define ITEM_FUNC_INCLUDED
-/* Copyright (c) 2000, 2013, Oracle and/or its affiliates.
- Copyright (c) 2009, 2014, SkySQL Ab.
+/* Copyright (c) 2000, 2014, Oracle and/or its affiliates.
+ Copyright (c) 2009, 2014, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -287,7 +287,8 @@ public:
inline longlong check_integer_overflow(longlong value, bool val_unsigned)
{
if ((unsigned_flag && !val_unsigned && value < 0) ||
- (!unsigned_flag && val_unsigned && (ulonglong) value > LONGLONG_MAX))
+ (!unsigned_flag && val_unsigned &&
+ (ulonglong) value > (ulonglong) LONGLONG_MAX))
return raise_integer_overflow();
return value;
}
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index 389d9d5380c..fb55b7660cb 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -1564,10 +1564,9 @@ String *Item_temporal_hybrid_func::val_str_ascii(String *str)
bool Item_func_from_days::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
{
longlong value=args[0]->val_int();
- if (args[0]->null_value)
- return (null_value= 1);
- if ((fuzzy_date & TIME_NO_ZERO_DATE) && value == 0)
- return (null_value= 1);
+ if ((null_value= (args[0]->null_value ||
+ ((fuzzy_date & TIME_NO_ZERO_DATE) && value == 0))))
+ return true;
bzero(ltime, sizeof(MYSQL_TIME));
if (get_date_from_daynr((long) value, &ltime->year, &ltime->month,
&ltime->day))
diff --git a/sql/lock.cc b/sql/lock.cc
index 4d7afc697ef..7c8368ab0e3 100644
--- a/sql/lock.cc
+++ b/sql/lock.cc
@@ -305,15 +305,16 @@ bool mysql_lock_tables(THD *thd, MYSQL_LOCK *sql_lock, uint flags)
int rc= 1;
ulong timeout= (flags & MYSQL_LOCK_IGNORE_TIMEOUT) ?
LONG_TIMEOUT : thd->variables.lock_wait_timeout;
-
+ PSI_stage_info org_stage;
DBUG_ENTER("mysql_lock_tables(sql_lock)");
- THD_STAGE_INFO(thd, stage_system_lock);
+ thd->enter_stage(&stage_system_lock, &org_stage, __func__, __FILE__,
+ __LINE__);
if (sql_lock->table_count && lock_external(thd, sql_lock->table,
sql_lock->table_count))
goto end;
- thd_proc_info(thd, "Table lock");
+ THD_STAGE_INFO(thd, stage_table_lock);
/* Copy the lock data array. thr_multi_lock() reorders its contents. */
memmove(sql_lock->locks + sql_lock->lock_count, sql_lock->locks,
@@ -331,7 +332,7 @@ bool mysql_lock_tables(THD *thd, MYSQL_LOCK *sql_lock, uint flags)
(void) unlock_external(thd, sql_lock->table, sql_lock->table_count);
end:
- THD_STAGE_INFO(thd, stage_after_table_lock);
+ THD_STAGE_INFO(thd, org_stage);
#ifdef WITH_WSREP
thd_proc_info(thd, "mysql_lock_tables(): unlocking tables II");
#else /* WITH_WSREP */
diff --git a/sql/log.cc b/sql/log.cc
index 7a8531b5f46..69e0ed34b01 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -3200,7 +3200,7 @@ const char *MYSQL_LOG::generate_name(const char *log_name,
MYSQL_BIN_LOG::MYSQL_BIN_LOG(uint *sync_period)
- :reset_master_pending(false), mark_xid_done_waiting(0),
+ :reset_master_pending(0), mark_xid_done_waiting(0),
bytes_written(0), file_id(1), open_count(1),
group_commit_queue(0), group_commit_queue_busy(FALSE),
num_commits(0), num_group_commits(0),
@@ -4036,12 +4036,13 @@ bool MYSQL_BIN_LOG::reset_logs(THD* thd, bool create_new_log,
do this before we take the LOCK_log to not deadlock.
*/
mysql_mutex_lock(&LOCK_xid_list);
- reset_master_pending= true;
+ reset_master_pending++;
while (mark_xid_done_waiting > 0)
mysql_cond_wait(&COND_xid_list, &LOCK_xid_list);
mysql_mutex_unlock(&LOCK_xid_list);
}
+ DEBUG_SYNC(thd, "reset_logs_after_set_reset_master_pending");
if (thd)
ha_reset_logs(thd);
/*
@@ -4223,7 +4224,7 @@ err:
DBUG_ASSERT(b->xid_count == 0);
my_free(binlog_xid_count_list.get());
}
- reset_master_pending= false;
+ reset_master_pending--;
mysql_mutex_unlock(&LOCK_xid_list);
}
diff --git a/sql/log.h b/sql/log.h
index d5aab4ac612..bda6e55e0c9 100644
--- a/sql/log.h
+++ b/sql/log.h
@@ -470,7 +470,7 @@ class MYSQL_BIN_LOG: public TC_LOG, private MYSQL_LOG
anyway). Instead we should signal COND_xid_list whenever a new binlog
checkpoint arrives - when all have arrived, RESET MASTER will complete.
*/
- bool reset_master_pending;
+ uint reset_master_pending;
ulong mark_xid_done_waiting;
/* LOCK_log and LOCK_index are inited by init_pthread_objects() */
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 525aa65bc8e..95c81258216 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -1672,7 +1672,7 @@ Log_event* Log_event::read_log_event(const char* buf, uint event_len,
ev = new Execute_load_log_event(buf, event_len, description_event);
break;
case START_EVENT_V3: /* this is sent only by MySQL <=4.x */
- ev = new Start_log_event_v3(buf, description_event);
+ ev = new Start_log_event_v3(buf, event_len, description_event);
break;
case STOP_EVENT:
ev = new Stop_log_event(buf, description_event);
@@ -4691,11 +4691,16 @@ void Start_log_event_v3::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
Start_log_event_v3::Start_log_event_v3()
*/
-Start_log_event_v3::Start_log_event_v3(const char* buf,
+Start_log_event_v3::Start_log_event_v3(const char* buf, uint event_len,
const Format_description_log_event
*description_event)
- :Log_event(buf, description_event)
+ :Log_event(buf, description_event), binlog_version(BINLOG_VERSION)
{
+ if (event_len < LOG_EVENT_MINIMAL_HEADER_LEN + ST_COMMON_HEADER_LEN_OFFSET)
+ {
+ server_version[0]= 0;
+ return;
+ }
buf+= LOG_EVENT_MINIMAL_HEADER_LEN;
binlog_version= uint2korr(buf+ST_BINLOG_VER_OFFSET);
memcpy(server_version, buf+ST_SERVER_VER_OFFSET,
@@ -5000,9 +5005,12 @@ Format_description_log_event(const char* buf,
const
Format_description_log_event*
description_event)
- :Start_log_event_v3(buf, description_event), event_type_permutation(0)
+ :Start_log_event_v3(buf, event_len, description_event),
+ common_header_len(0), post_header_len(NULL), event_type_permutation(0)
{
DBUG_ENTER("Format_description_log_event::Format_description_log_event(char*,...)");
+ if (!Start_log_event_v3::is_valid())
+ DBUG_VOID_RETURN; /* sanity check */
buf+= LOG_EVENT_MINIMAL_HEADER_LEN;
if ((common_header_len=buf[ST_COMMON_HEADER_LEN_OFFSET]) < OLD_HEADER_LEN)
DBUG_VOID_RETURN; /* sanity check */
diff --git a/sql/log_event.h b/sql/log_event.h
index c0370014c7d..6a3e6f174bb 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -2515,7 +2515,7 @@ public:
void print(FILE* file, PRINT_EVENT_INFO* print_event_info);
#endif
- Start_log_event_v3(const char* buf,
+ Start_log_event_v3(const char* buf, uint event_len,
const Format_description_log_event* description_event);
~Start_log_event_v3() {}
Log_event_type get_type_code() { return START_EVENT_V3;}
@@ -2524,7 +2524,7 @@ public:
#ifdef MYSQL_SERVER
bool write(IO_CACHE* file);
#endif
- bool is_valid() const { return 1; }
+ bool is_valid() const { return server_version[0] != 0; }
int get_data_size()
{
return START_V3_HEADER_LEN; //no variable-sized part
diff --git a/sql/mdl.cc b/sql/mdl.cc
index 5755b2bbfd5..4c962d3c570 100644
--- a/sql/mdl.cc
+++ b/sql/mdl.cc
@@ -2517,6 +2517,7 @@ MDL_context::acquire_lock(MDL_request *mdl_request, ulong lock_wait_timeout)
my_error(ER_LOCK_WAIT_TIMEOUT, MYF(0));
break;
case MDL_wait::KILLED:
+ get_thd()->send_kill_message();
break;
default:
DBUG_ASSERT(0);
diff --git a/sql/mdl.h b/sql/mdl.h
index 639a8966b33..421258c2ab7 100644
--- a/sql/mdl.h
+++ b/sql/mdl.h
@@ -351,7 +351,7 @@ public:
NAME_LEN) - m_ptr + 1);
m_hash_value= my_hash_sort(&my_charset_bin, (uchar*) m_ptr + 1,
m_length - 1);
- DBUG_ASSERT(ok_for_lower_case_names(db));
+ DBUG_ASSERT(mdl_namespace == USER_LOCK || ok_for_lower_case_names(db));
}
void mdl_key_init(const MDL_key *rhs)
{
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 84e2ae56fa4..19f067f582f 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -6254,6 +6254,7 @@ int mysqld_main(int argc, char **argv)
(char*) "" : mysqld_unix_port),
mysqld_port,
MYSQL_COMPILATION_COMMENT);
+ fclose(stdin);
#if defined(_WIN32) && !defined(EMBEDDED_LIBRARY)
Service.SetRunning();
#endif
@@ -10266,6 +10267,8 @@ PSI_stage_info stage_sql_thd_waiting_until_delay= { 0, "Waiting until MASTER_DEL
PSI_stage_info stage_storing_result_in_query_cache= { 0, "storing result in query cache", 0};
PSI_stage_info stage_storing_row_into_queue= { 0, "storing row into queue", 0};
PSI_stage_info stage_system_lock= { 0, "System lock", 0};
+PSI_stage_info stage_table_lock= { 0, "Table lock", 0};
+PSI_stage_info stage_filling_schema_table= { 0, "Filling schema table", 0};
PSI_stage_info stage_update= { 0, "update", 0};
PSI_stage_info stage_updating= { 0, "updating", 0};
PSI_stage_info stage_updating_main_table= { 0, "updating main table", 0};
@@ -10399,6 +10402,8 @@ PSI_stage_info *all_server_stages[]=
& stage_storing_result_in_query_cache,
& stage_storing_row_into_queue,
& stage_system_lock,
+ & stage_table_lock,
+ & stage_filling_schema_table,
& stage_update,
& stage_updating,
& stage_updating_main_table,
diff --git a/sql/mysqld.h b/sql/mysqld.h
index 860800cb725..a9eb4fd1ca5 100644
--- a/sql/mysqld.h
+++ b/sql/mysqld.h
@@ -418,6 +418,8 @@ extern PSI_stage_info stage_statistics;
extern PSI_stage_info stage_storing_result_in_query_cache;
extern PSI_stage_info stage_storing_row_into_queue;
extern PSI_stage_info stage_system_lock;
+extern PSI_stage_info stage_table_lock;
+extern PSI_stage_info stage_filling_schema_table;
extern PSI_stage_info stage_update;
extern PSI_stage_info stage_updating;
extern PSI_stage_info stage_updating_main_table;
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index a5c27fa66e2..3597ade2cba 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -13167,12 +13167,13 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
SYNOPSIS
check_group_min_max_predicates()
- cond [in] the expression tree being analyzed
- min_max_arg [in] the field referenced by the MIN/MAX function(s)
- image_type [in]
- has_min_max_arg [out] true if the subtree being analyzed references min_max_arg
- has_other_arg [out] true if the subtree being analyzed references a column
- other min_max_arg
+ cond [in] the expression tree being analyzed
+ min_max_arg [in] the field referenced by the MIN/MAX function(s)
+ image_type [in]
+ has_min_max_arg [out] true if the subtree being analyzed references
+ min_max_arg
+ has_other_arg [out] true if the subtree being analyzed references a
+ column other min_max_arg
DESCRIPTION
The function walks recursively over the cond tree representing a WHERE
@@ -13216,7 +13217,7 @@ check_group_min_max_predicates(Item *cond, Item_field *min_max_arg_item,
(2) the subtree passes the test, but it is an OR and it references both
the min/max argument and other columns.
*/
- if (!check_group_min_max_predicates(and_or_arg, min_max_arg_item, //1
+ if (!check_group_min_max_predicates(and_or_arg, min_max_arg_item, //1
image_type,
&has_min_max, &has_other) ||
(func_type == Item_func::COND_OR_FUNC && has_min_max && has_other))//2
@@ -13232,7 +13233,7 @@ check_group_min_max_predicates(Item *cond, Item_field *min_max_arg_item,
a subquery in the WHERE clause.
*/
- if (cond_type == Item::SUBSELECT_ITEM)
+ if (unlikely(cond_type == Item::SUBSELECT_ITEM))
{
Item_subselect *subs_cond= (Item_subselect*) cond;
if (subs_cond->is_correlated)
@@ -13249,7 +13250,14 @@ check_group_min_max_predicates(Item *cond, Item_field *min_max_arg_item,
}
DBUG_RETURN(TRUE);
}
-
+ /*
+ Subquery with IS [NOT] NULL
+ TODO: Look into the cache_item and optimize it like we do for
+ subselect's above
+ */
+ if (unlikely(cond_type == Item::CACHE_ITEM))
+ DBUG_RETURN(cond->const_item());
+
/*
Condition of the form 'field' is equivalent to 'field <> 0' and thus
satisfies the SA3 condition.
@@ -13266,7 +13274,9 @@ check_group_min_max_predicates(Item *cond, Item_field *min_max_arg_item,
/* We presume that at this point there are no other Items than functions. */
DBUG_ASSERT(cond_type == Item::FUNC_ITEM);
-
+ if (unlikely(cond_type != Item::FUNC_ITEM)) /* Safety */
+ DBUG_RETURN(FALSE);
+
/* Test if cond references only group-by or non-group fields. */
Item_func *pred= (Item_func*) cond;
Item_func::Functype pred_type= pred->functype();
diff --git a/sql/rpl_parallel.cc b/sql/rpl_parallel.cc
index 7c273d51a19..46c3e4aaaf4 100644
--- a/sql/rpl_parallel.cc
+++ b/sql/rpl_parallel.cc
@@ -106,9 +106,10 @@ handle_queued_pos_update(THD *thd, rpl_parallel_thread::queued_event *qev)
static void
-finish_event_group(THD *thd, uint64 sub_id, rpl_parallel_entry *entry,
- rpl_group_info *rgi)
+finish_event_group(rpl_parallel_thread *rpt, uint64 sub_id,
+ rpl_parallel_entry *entry, rpl_group_info *rgi)
{
+ THD *thd= rpt->thd;
wait_for_commit *wfc= &rgi->commit_orderer;
int err;
@@ -139,25 +140,47 @@ finish_event_group(THD *thd, uint64 sub_id, rpl_parallel_entry *entry,
signal_error_to_sql_driver_thread(thd, rgi, err);
thd->wait_for_commit_ptr= NULL;
+ mysql_mutex_lock(&entry->LOCK_parallel_entry);
/*
- Record that this event group has finished (eg. transaction is
- committed, if transactional), so other event groups will no longer
- attempt to wait for us to commit. Once we have increased
- entry->last_committed_sub_id, no other threads will execute
- register_wait_for_prior_commit() against us. Thus, by doing one
- extra (usually redundant) wakeup_subsequent_commits() we can ensure
- that no register_wait_for_prior_commit() can ever happen without a
- subsequent wakeup_subsequent_commits() to wake it up.
-
- We can race here with the next transactions, but that is fine, as
- long as we check that we do not decrease last_committed_sub_id. If
- this commit is done, then any prior commits will also have been
- done and also no longer need waiting for.
+ We need to mark that this event group started its commit phase, in case we
+ missed it before (otherwise we would deadlock the next event group that is
+ waiting for this). In most cases (normal DML), it will be a no-op.
*/
- mysql_mutex_lock(&entry->LOCK_parallel_entry);
+ rgi->mark_start_commit_no_lock();
+
if (entry->last_committed_sub_id < sub_id)
+ {
+ /*
+ Record that this event group has finished (eg. transaction is
+ committed, if transactional), so other event groups will no longer
+ attempt to wait for us to commit. Once we have increased
+ entry->last_committed_sub_id, no other threads will execute
+ register_wait_for_prior_commit() against us. Thus, by doing one
+ extra (usually redundant) wakeup_subsequent_commits() we can ensure
+ that no register_wait_for_prior_commit() can ever happen without a
+ subsequent wakeup_subsequent_commits() to wake it up.
+
+ We can race here with the next transactions, but that is fine, as
+ long as we check that we do not decrease last_committed_sub_id. If
+ this commit is done, then any prior commits will also have been
+ done and also no longer need waiting for.
+ */
entry->last_committed_sub_id= sub_id;
+ /* Now free any GCOs in which all transactions have committed. */
+ group_commit_orderer *tmp_gco= rgi->gco;
+ while (tmp_gco &&
+ (!tmp_gco->next_gco || tmp_gco->last_sub_id > sub_id))
+ tmp_gco= tmp_gco->prev_gco;
+ while (tmp_gco)
+ {
+ group_commit_orderer *prev_gco= tmp_gco->prev_gco;
+ tmp_gco->next_gco->prev_gco= NULL;
+ rpt->loc_free_gco(tmp_gco);
+ tmp_gco= prev_gco;
+ }
+ }
+
/*
If this event group got error, then any following event groups that have
not yet started should just skip their group, preparing for stop of the
@@ -166,12 +189,6 @@ finish_event_group(THD *thd, uint64 sub_id, rpl_parallel_entry *entry,
if (unlikely(rgi->worker_error) &&
entry->stop_on_error_sub_id == (uint64)ULONGLONG_MAX)
entry->stop_on_error_sub_id= sub_id;
- /*
- We need to mark that this event group started its commit phase, in case we
- missed it before (otherwise we would deadlock the next event group that is
- waiting for this). In most cases (normal DML), it will be a no-op.
- */
- rgi->mark_start_commit_no_lock();
mysql_mutex_unlock(&entry->LOCK_parallel_entry);
thd->clear_error();
@@ -329,6 +346,7 @@ do_retry:
until after the unmark.
*/
rgi->unmark_start_commit();
+ DEBUG_SYNC(thd, "rpl_parallel_retry_after_unmark");
/*
We might get the deadlock error that causes the retry during commit, while
@@ -517,7 +535,7 @@ handle_rpl_parallel_thread(void *arg)
bool in_event_group= false;
bool skip_event_group= false;
rpl_group_info *group_rgi= NULL;
- group_commit_orderer *gco, *tmp_gco;
+ group_commit_orderer *gco;
uint64 event_gtid_sub_id= 0;
rpl_sql_thread_info sql_info(NULL);
int err;
@@ -610,7 +628,7 @@ handle_rpl_parallel_thread(void *arg)
*/
group_rgi->cleanup_context(thd, 1);
in_event_group= false;
- finish_event_group(thd, group_rgi->gtid_sub_id,
+ finish_event_group(rpt, group_rgi->gtid_sub_id,
qev->entry_for_queued, group_rgi);
rpt->loc_free_rgi(group_rgi);
@@ -631,6 +649,14 @@ handle_rpl_parallel_thread(void *arg)
PSI_stage_info old_stage;
uint64 wait_count;
+ DBUG_EXECUTE_IF("rpl_parallel_scheduled_gtid_0_x_100", {
+ if (rgi->current_gtid.domain_id == 0 &&
+ rgi->current_gtid.seq_no == 100) {
+ debug_sync_set_action(thd,
+ STRING_WITH_LEN("now SIGNAL scheduled_gtid_0_x_100"));
+ }
+ });
+
in_event_group= true;
/*
If the standalone flag is set, then this event group consists of a
@@ -656,8 +682,12 @@ handle_rpl_parallel_thread(void *arg)
mysql_mutex_lock(&entry->LOCK_parallel_entry);
if (!gco->installed)
{
- if (gco->prev_gco)
- gco->prev_gco->next_gco= gco;
+ group_commit_orderer *prev_gco= gco->prev_gco;
+ if (prev_gco)
+ {
+ prev_gco->last_sub_id= gco->prior_sub_id;
+ prev_gco->next_gco= gco;
+ }
gco->installed= true;
}
wait_count= gco->wait_count;
@@ -674,6 +704,8 @@ handle_rpl_parallel_thread(void *arg)
if (thd->check_killed() && !rgi->worker_error)
{
DEBUG_SYNC(thd, "rpl_parallel_start_waiting_for_prior_killed");
+ thd->clear_error();
+ thd->get_stmt_da()->reset_diagnostics_area();
thd->send_kill_message();
slave_output_error_info(rgi, thd);
signal_error_to_sql_driver_thread(thd, rgi, 1);
@@ -690,18 +722,6 @@ handle_rpl_parallel_thread(void *arg)
} while (wait_count > entry->count_committing_event_groups);
}
- if ((tmp_gco= gco->prev_gco))
- {
- /*
- Now all the event groups in the previous batch have entered their
- commit phase, and will no longer access their gco. So we can free
- it here.
- */
- DBUG_ASSERT(!tmp_gco->prev_gco);
- gco->prev_gco= NULL;
- rpt->loc_free_gco(tmp_gco);
- }
-
if (entry->force_abort && wait_count > entry->stop_count)
{
/*
@@ -765,6 +785,7 @@ handle_rpl_parallel_thread(void *arg)
{
DEBUG_SYNC(thd, "rpl_parallel_before_mark_start_commit");
rgi->mark_start_commit();
+ DEBUG_SYNC(thd, "rpl_parallel_after_mark_start_commit");
}
/*
@@ -785,6 +806,7 @@ handle_rpl_parallel_thread(void *arg)
thd->get_stmt_da()->reset_diagnostics_area();
my_error(ER_LOCK_DEADLOCK, MYF(0));
err= 1;
+ DEBUG_SYNC(thd, "rpl_parallel_simulate_temp_err_xid");
});
if (!err)
#endif
@@ -824,7 +846,7 @@ handle_rpl_parallel_thread(void *arg)
if (end_of_group)
{
in_event_group= false;
- finish_event_group(thd, event_gtid_sub_id, entry, rgi);
+ finish_event_group(rpt, event_gtid_sub_id, entry, rgi);
rpt->loc_free_rgi(rgi);
thd->rgi_slave= group_rgi= rgi= NULL;
skip_event_group= false;
@@ -865,7 +887,7 @@ handle_rpl_parallel_thread(void *arg)
*/
mysql_mutex_unlock(&rpt->LOCK_rpl_thread);
signal_error_to_sql_driver_thread(thd, group_rgi, 1);
- finish_event_group(thd, group_rgi->gtid_sub_id,
+ finish_event_group(rpt, group_rgi->gtid_sub_id,
group_rgi->parallel_entry, group_rgi);
in_event_group= false;
mysql_mutex_lock(&rpt->LOCK_rpl_thread);
@@ -914,7 +936,6 @@ handle_rpl_parallel_thread(void *arg)
static void
dealloc_gco(group_commit_orderer *gco)
{
- DBUG_ASSERT(!gco->prev_gco /* Must only free after dealloc previous */);
mysql_cond_destroy(&gco->COND_group_commit_orderer);
my_free(gco);
}
@@ -1123,9 +1144,9 @@ rpl_parallel_thread::inuse_relaylog_refcount_update()
inuse_relaylog *ir= accumulated_ir_last;
if (ir)
{
- my_atomic_rwlock_wrlock(&ir->rli->inuse_relaylog_atomic_lock);
+ my_atomic_rwlock_wrlock(&ir->inuse_relaylog_atomic_lock);
my_atomic_add64(&ir->dequeued_count, accumulated_ir_count);
- my_atomic_rwlock_wrunlock(&ir->rli->inuse_relaylog_atomic_lock);
+ my_atomic_rwlock_wrunlock(&ir->inuse_relaylog_atomic_lock);
accumulated_ir_count= 0;
accumulated_ir_last= NULL;
}
@@ -1295,7 +1316,8 @@ rpl_parallel_thread::free_rgi(rpl_group_info *rgi)
group_commit_orderer *
-rpl_parallel_thread::get_gco(uint64 wait_count, group_commit_orderer *prev)
+rpl_parallel_thread::get_gco(uint64 wait_count, group_commit_orderer *prev,
+ uint64 prior_sub_id)
{
group_commit_orderer *gco;
mysql_mutex_assert_owner(&LOCK_rpl_thread);
@@ -1311,6 +1333,7 @@ rpl_parallel_thread::get_gco(uint64 wait_count, group_commit_orderer *prev)
gco->wait_count= wait_count;
gco->prev_gco= prev;
gco->next_gco= NULL;
+ gco->prior_sub_id= prior_sub_id;
gco->installed= false;
return gco;
}
@@ -1319,7 +1342,6 @@ rpl_parallel_thread::get_gco(uint64 wait_count, group_commit_orderer *prev)
void
rpl_parallel_thread::loc_free_gco(group_commit_orderer *gco)
{
- DBUG_ASSERT(!gco->prev_gco /* Must not free until wait has completed. */);
if (!loc_gco_list)
loc_gco_last_ptr_ptr= &gco->next_gco;
else
@@ -1526,8 +1548,12 @@ static void
free_rpl_parallel_entry(void *element)
{
rpl_parallel_entry *e= (rpl_parallel_entry *)element;
- if (e->current_gco)
+ while (e->current_gco)
+ {
+ group_commit_orderer *prev_gco= e->current_gco->prev_gco;
dealloc_gco(e->current_gco);
+ e->current_gco= prev_gco;
+ }
mysql_cond_destroy(&e->COND_parallel_entry);
mysql_mutex_destroy(&e->LOCK_parallel_entry);
my_free(e);
@@ -1999,7 +2025,7 @@ rpl_parallel::do_event(rpl_group_info *serial_rgi, Log_event *ev,
uint64 count= e->count_queued_event_groups;
group_commit_orderer *gco;
- if (!(gco= cur_thread->get_gco(count, e->current_gco)))
+ if (!(gco= cur_thread->get_gco(count, e->current_gco, e->current_sub_id)))
{
cur_thread->free_rgi(rgi);
cur_thread->free_qev(qev);
diff --git a/sql/rpl_parallel.h b/sql/rpl_parallel.h
index 239818855b8..2604cd98527 100644
--- a/sql/rpl_parallel.h
+++ b/sql/rpl_parallel.h
@@ -39,9 +39,12 @@ struct inuse_relaylog;
rpl_parallel_entry::count_committing_event_groups has reached
gco->next_gco->wait_count.
- - When gco->wait_count is reached for a worker and the wait completes,
- the worker frees gco->prev_gco; at this point it is guaranteed not to
- be needed any longer.
+ - The gco lives until all its event groups have completed their commit.
+ This is detected by rpl_parallel_entry::last_committed_sub_id being
+ greater than or equal gco->last_sub_id. Once this happens, the gco is
+ freed. Note that since update of last_committed_sub_id can happen
+ out-of-order, the thread that frees a given gco can be for any later
+ event group, not necessarily an event group from the gco being freed.
*/
struct group_commit_orderer {
/* Wakeup condition, used with rpl_parallel_entry::LOCK_parallel_entry. */
@@ -49,6 +52,16 @@ struct group_commit_orderer {
uint64 wait_count;
group_commit_orderer *prev_gco;
group_commit_orderer *next_gco;
+ /*
+ The sub_id of last event group in this the previous GCO.
+ Only valid if prev_gco != NULL.
+ */
+ uint64 prior_sub_id;
+ /*
+ The sub_id of the last event group in this GCO. Only valid when next_gco
+ is non-NULL.
+ */
+ uint64 last_sub_id;
bool installed;
};
@@ -168,7 +181,8 @@ struct rpl_parallel_thread {
LOCK_rpl_thread mutex.
*/
void free_rgi(rpl_group_info *rgi);
- group_commit_orderer *get_gco(uint64 wait_count, group_commit_orderer *prev);
+ group_commit_orderer *get_gco(uint64 wait_count, group_commit_orderer *prev,
+ uint64 first_sub_id);
/*
Put a gco on the local free list, to be later released to the global free
list by batch_free().
diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc
index d21ebd494c1..a751dd16650 100644
--- a/sql/rpl_rli.cc
+++ b/sql/rpl_rli.cc
@@ -986,11 +986,11 @@ void Relay_log_info::inc_group_relay_log_pos(ulonglong log_pos,
if (rgi->is_parallel_exec)
{
/* In case of parallel replication, do not update the position backwards. */
- int cmp= strcmp(group_relay_log_name, event_relay_log_name);
+ int cmp= strcmp(group_relay_log_name, rgi->event_relay_log_name);
if (cmp < 0)
{
group_relay_log_pos= rgi->future_event_relay_log_pos;
- strmake_buf(group_relay_log_name, event_relay_log_name);
+ strmake_buf(group_relay_log_name, rgi->event_relay_log_name);
notify_group_relay_log_name_update();
} else if (cmp == 0 && group_relay_log_pos < rgi->future_event_relay_log_pos)
group_relay_log_pos= rgi->future_event_relay_log_pos;
@@ -1717,6 +1717,11 @@ void rpl_group_info::cleanup_context(THD *thd, bool error)
trans_rollback_stmt(thd); // if a "statement transaction"
/* trans_rollback() also resets OPTION_GTID_BEGIN */
trans_rollback(thd); // if a "real transaction"
+ /*
+ Now that we have rolled back the transaction, make sure we do not
+ errorneously update the GTID position.
+ */
+ gtid_pending= false;
}
m_table_map.clear_tables();
slave_close_thread_tables(thd);
@@ -1844,11 +1849,20 @@ void rpl_group_info::slave_close_thread_tables(THD *thd)
static void
-mark_start_commit_inner(rpl_parallel_entry *e, group_commit_orderer *gco)
+mark_start_commit_inner(rpl_parallel_entry *e, group_commit_orderer *gco,
+ rpl_group_info *rgi)
{
+ group_commit_orderer *tmp;
uint64 count= ++e->count_committing_event_groups;
- if (gco->next_gco && gco->next_gco->wait_count == count)
- mysql_cond_broadcast(&gco->next_gco->COND_group_commit_orderer);
+ /* Signal any following GCO whose wait_count has been reached now. */
+ tmp= gco;
+ while ((tmp= tmp->next_gco))
+ {
+ uint64 wait_count= tmp->wait_count;
+ if (wait_count > count)
+ break;
+ mysql_cond_broadcast(&tmp->COND_group_commit_orderer);
+ }
}
@@ -1857,7 +1871,7 @@ rpl_group_info::mark_start_commit_no_lock()
{
if (did_mark_start_commit)
return;
- mark_start_commit_inner(parallel_entry, gco);
+ mark_start_commit_inner(parallel_entry, gco, this);
did_mark_start_commit= true;
}
@@ -1872,7 +1886,7 @@ rpl_group_info::mark_start_commit()
e= this->parallel_entry;
mysql_mutex_lock(&e->LOCK_parallel_entry);
- mark_start_commit_inner(e, gco);
+ mark_start_commit_inner(e, gco, this);
mysql_mutex_unlock(&e->LOCK_parallel_entry);
did_mark_start_commit= true;
}
diff --git a/sql/rpl_rli.h b/sql/rpl_rli.h
index 9885417aa3f..fb4e3261468 100644
--- a/sql/rpl_rli.h
+++ b/sql/rpl_rli.h
@@ -563,6 +563,10 @@ struct rpl_group_info
(When we execute in parallel the transactions that group committed
together on the master, we still need to wait for any prior transactions
to have reached the commit stage).
+
+ The pointed-to gco is only valid for as long as
+ gtid_sub_id < parallel_entry->last_committed_sub_id. After that, it can
+ be freed by another thread.
*/
group_commit_orderer *gco;
diff --git a/sql/slave.cc b/sql/slave.cc
index c569499fdcf..237c1c57ccc 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -5608,7 +5608,7 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len)
char str_buf[128];
String str(str_buf, sizeof(str_buf), system_charset_info);
mi->rli.until_gtid_pos.to_string(&str);
- sql_print_information("Slave IO thread stops because it reached its"
+ sql_print_information("Slave I/O thread stops because it reached its"
" UNTIL master_gtid_pos %s", str.c_ptr_safe());
mi->abort_slave= true;
}
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 41639ea352f..5ab03388f01 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -3293,6 +3293,14 @@ public:
mysql_mutex_unlock(&LOCK_thd_data);
}
}
+ inline void reset_kill_query()
+ {
+ if (killed < KILL_CONNECTION)
+ {
+ reset_killed();
+ mysys_var->abort= 0;
+ }
+ }
inline void send_kill_message() const
{
int err= killed_errno();
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index c842bed9ac2..77a45cbae59 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -1762,7 +1762,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
table->file->adjust_next_insert_id_after_explicit_value(table->next_number_field->val_int());
}
- else
+ else if (prev_insert_id_for_cur_row)
{
table->file->restore_auto_increment(prev_insert_id_for_cur_row);
}
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index b389e591bc0..87810a65b0f 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -1297,6 +1297,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
thd->enable_slow_log= TRUE;
thd->query_plan_flags= QPLAN_INIT;
thd->lex->sql_command= SQLCOM_END; /* to avoid confusing VIEW detectors */
+ thd->reset_kill_query();
DEBUG_SYNC(thd,"dispatch_command_before_set_time");
@@ -5560,11 +5561,7 @@ finish:
if (! thd->get_stmt_da()->is_set())
thd->send_kill_message();
}
- if (thd->killed < KILL_CONNECTION)
- {
- thd->reset_killed();
- thd->mysys_var->abort= 0;
- }
+ thd->reset_kill_query();
}
if (thd->is_error() || (thd->variables.option_bits & OPTION_MASTER_SQL_ERROR))
trans_rollback_stmt(thd);
@@ -6456,6 +6453,115 @@ bool check_global_access(THD *thd, ulong want_access, bool no_errors)
#endif
}
+
+/**
+ Checks foreign key's parent table access.
+
+ @param thd [in] Thread handler
+ @param create_info [in] Create information (like MAX_ROWS, ENGINE or
+ temporary table flag)
+ @param alter_info [in] Initial list of columns and indexes for the
+ table to be created
+
+ @retval
+ false ok.
+ @retval
+ true error or access denied. Error is sent to client in this case.
+*/
+bool check_fk_parent_table_access(THD *thd,
+ HA_CREATE_INFO *create_info,
+ Alter_info *alter_info)
+{
+ Key *key;
+ List_iterator<Key> key_iterator(alter_info->key_list);
+
+ while ((key= key_iterator++))
+ {
+ if (key->type == Key::FOREIGN_KEY)
+ {
+ TABLE_LIST parent_table;
+ bool is_qualified_table_name;
+ Foreign_key *fk_key= (Foreign_key *)key;
+ LEX_STRING db_name;
+ LEX_STRING table_name= { fk_key->ref_table.str,
+ fk_key->ref_table.length };
+ const ulong privileges= (SELECT_ACL | INSERT_ACL | UPDATE_ACL |
+ DELETE_ACL | REFERENCES_ACL);
+
+ // Check if tablename is valid or not.
+ DBUG_ASSERT(table_name.str != NULL);
+ if (check_table_name(table_name.str, table_name.length, false))
+ {
+ my_error(ER_WRONG_TABLE_NAME, MYF(0), table_name.str);
+ return true;
+ }
+
+ if (fk_key->ref_db.str)
+ {
+ is_qualified_table_name= true;
+ db_name.str= (char *) thd->memdup(fk_key->ref_db.str,
+ fk_key->ref_db.length+1);
+ db_name.length= fk_key->ref_db.length;
+
+ // Check if database name is valid or not.
+ if (fk_key->ref_db.str && check_db_name(&db_name))
+ {
+ my_error(ER_WRONG_DB_NAME, MYF(0), db_name.str);
+ return true;
+ }
+ }
+ else if (thd->lex->copy_db_to(&db_name.str, &db_name.length))
+ return true;
+ else
+ is_qualified_table_name= false;
+
+ // if lower_case_table_names is set then convert tablename to lower case.
+ if (lower_case_table_names)
+ {
+ table_name.str= (char *) thd->memdup(fk_key->ref_table.str,
+ fk_key->ref_table.length+1);
+ table_name.length= my_casedn_str(files_charset_info, table_name.str);
+ }
+
+ parent_table.init_one_table(db_name.str, db_name.length,
+ table_name.str, table_name.length,
+ table_name.str, TL_IGNORE);
+
+ /*
+ Check if user has any of the "privileges" at table level on
+ "parent_table".
+ Having privilege on any of the parent_table column is not
+ enough so checking whether user has any of the "privileges"
+ at table level only here.
+ */
+ if (check_some_access(thd, privileges, &parent_table) ||
+ parent_table.grant.want_privilege)
+ {
+ if (is_qualified_table_name)
+ {
+ const size_t qualified_table_name_len= NAME_LEN + 1 + NAME_LEN + 1;
+ char *qualified_table_name= (char *) thd->alloc(qualified_table_name_len);
+
+ my_snprintf(qualified_table_name, qualified_table_name_len, "%s.%s",
+ db_name.str, table_name.str);
+ table_name.str= qualified_table_name;
+ }
+
+ my_error(ER_TABLEACCESS_DENIED_ERROR, MYF(0),
+ "REFERENCES",
+ thd->security_ctx->priv_user,
+ thd->security_ctx->host_or_ip,
+ table_name.str);
+
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+
/****************************************************************************
Check stack size; Send error if there isn't enough stack to continue
****************************************************************************/
@@ -7995,7 +8101,7 @@ static uint kill_threads_for_user(THD *thd, LEX_USER *user,
host.str[0] == '%' means that host name was not given. See sql_yacc.yy
*/
if (((user->host.str[0] == '%' && !user->host.str[1]) ||
- !strcmp(tmp->security_ctx->host, user->host.str)) &&
+ !strcmp(tmp->security_ctx->host_or_ip, user->host.str)) &&
!strcmp(tmp->security_ctx->user, user->user.str))
{
if (!(thd->security_ctx->master_access & SUPER_ACL) &&
@@ -8651,7 +8757,9 @@ bool create_table_precheck(THD *thd, TABLE_LIST *tables,
if (check_table_access(thd, SELECT_ACL, tables, FALSE, UINT_MAX, FALSE))
goto err;
}
- error= FALSE;
+
+ if (check_fk_parent_table_access(thd, &lex->create_info, &lex->alter_info))
+ goto err;
/*
For CREATE TABLE we should not open the table even if it exists.
@@ -8659,6 +8767,8 @@ bool create_table_precheck(THD *thd, TABLE_LIST *tables,
*/
lex->query_tables->open_strategy= TABLE_LIST::OPEN_STUB;
+ error= FALSE;
+
err:
DBUG_RETURN(error);
}
diff --git a/sql/sql_parse.h b/sql/sql_parse.h
index 773ede9edee..5e46c881510 100644
--- a/sql/sql_parse.h
+++ b/sql/sql_parse.h
@@ -46,6 +46,9 @@ bool delete_precheck(THD *thd, TABLE_LIST *tables);
bool insert_precheck(THD *thd, TABLE_LIST *tables);
bool create_table_precheck(THD *thd, TABLE_LIST *tables,
TABLE_LIST *create_table);
+bool check_fk_parent_table_access(THD *thd,
+ HA_CREATE_INFO *create_info,
+ Alter_info *alter_info);
bool parse_sql(THD *thd, Parser_state *parser_state,
Object_creation_ctx *creation_ctx, bool do_pfs_digest=false);
diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc
index 34ad2ff2615..b5a849f7fd4 100644
--- a/sql/sql_plugin.cc
+++ b/sql/sql_plugin.cc
@@ -3967,7 +3967,7 @@ static int test_plugin_options(MEM_ROOT *tmp_root, struct st_plugin_int *tmp,
we copy string values to a plugin's memroot.
*/
if (mysqld_server_started &&
- ((o->flags & (PLUGIN_VAR_STR | PLUGIN_VAR_NOCMDOPT |
+ ((o->flags & (PLUGIN_VAR_TYPEMASK | PLUGIN_VAR_NOCMDOPT |
PLUGIN_VAR_MEMALLOC)) == PLUGIN_VAR_STR))
{
sysvar_str_t* str= (sysvar_str_t *)o;
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 8f68c929a9e..ed56191c32b 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -291,18 +291,18 @@ static double table_cond_selectivity(JOIN *join, uint idx, JOIN_TAB *s,
void dbug_serve_apcs(THD *thd, int n_calls)
{
const char *save_proc_info= thd->proc_info;
- /* This is so that mysqltest knows we're ready to serve requests: */
- thd_proc_info(thd, "show_explain_trap");
/* Busy-wait for n_calls APC requests to arrive and be processed */
int n_apcs= thd->apc_target.n_calls_processed + n_calls;
while (thd->apc_target.n_calls_processed < n_apcs)
{
- my_sleep(300);
+ /* This is so that mysqltest knows we're ready to serve requests: */
+ thd_proc_info(thd, "show_explain_trap");
+ my_sleep(30000);
+ thd_proc_info(thd, save_proc_info);
if (thd->check_killed())
break;
}
- thd_proc_info(thd, save_proc_info);
}
@@ -3032,6 +3032,7 @@ void JOIN::exec_inner()
const ha_rows select_limit_arg=
select_options & OPTION_FOUND_ROWS
? HA_POS_ERROR : unit->select_limit_cnt;
+ curr_join->filesort_found_rows= filesort_limit_arg != HA_POS_ERROR;
DBUG_PRINT("info", ("has_group_by %d "
"curr_join->table_count %d "
@@ -3079,7 +3080,8 @@ void JOIN::exec_inner()
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF);
error= do_select(curr_join, curr_fields_list, NULL, procedure);
thd->limit_found_rows= curr_join->send_records;
- if (curr_join->order && curr_join->filesort_found_rows)
+ if (curr_join->order && curr_join->sortorder &&
+ curr_join->filesort_found_rows)
{
/* Use info provided by filesort. */
DBUG_ASSERT(curr_join->table_count > curr_join->const_tables);
@@ -9989,7 +9991,7 @@ bool generate_derived_keys_for_table(KEYUSE *keyuse, uint count, uint keys)
else
{
/* Mark keyuses for this key to be excluded */
- for (KEYUSE *curr=save_first_keyuse; curr < first_keyuse; curr++)
+ for (KEYUSE *curr=save_first_keyuse; curr < keyuse; curr++)
{
curr->key= MAX_KEY;
}
@@ -12213,8 +12215,8 @@ public:
{ TRASH(ptr, size); }
Item *and_level;
- Item_func *cmp_func;
- COND_CMP(Item *a,Item_func *b) :and_level(a),cmp_func(b) {}
+ Item_bool_func2 *cmp_func;
+ COND_CMP(Item *a,Item_bool_func2 *b) :and_level(a),cmp_func(b) {}
};
/**
@@ -13603,6 +13605,75 @@ static void update_const_equal_items(COND *cond, JOIN_TAB *tab, bool const_key)
}
+/**
+ Check if
+ WHERE expr=value AND expr=const
+ can be rewritten as:
+ WHERE const=value AND expr=const
+
+ @param target - the target operator whose "expr" argument will be
+ replaced to "const".
+ @param target_expr - the target's "expr" which will be replaced to "const".
+ @param target_value - the target's second argument, it will remain unchanged.
+ @param source - the equality expression ("=" or "<=>") that
+ can be used to rewrite the "target" part
+ (under certain conditions, see the code).
+ @param source_expr - the source's "expr". It should be exactly equal to
+ the target's "expr" to make condition rewrite possible.
+ @param source_const - the source's "const" argument, it will be inserted
+ into "target" instead of "expr".
+*/
+static bool
+can_change_cond_ref_to_const(Item_bool_func2 *target,
+ Item *target_expr, Item *target_value,
+ Item_bool_func2 *source,
+ Item *source_expr, Item *source_const)
+{
+ if (!target_expr->eq(source_expr,0) ||
+ target_value == source_const ||
+ target_expr->cmp_context != source_expr->cmp_context)
+ return false;
+ if (target_expr->cmp_context == STRING_RESULT)
+ {
+ /*
+ In this example:
+ SET NAMES utf8 COLLATE utf8_german2_ci;
+ DROP TABLE IF EXISTS t1;
+ CREATE TABLE t1 (a CHAR(10) CHARACTER SET utf8);
+ INSERT INTO t1 VALUES ('o-umlaut'),('oe');
+ SELECT * FROM t1 WHERE a='oe' COLLATE utf8_german2_ci AND a='oe';
+
+ the query should return only the row with 'oe'.
+ It should not return 'o-umlaut', because 'o-umlaut' does not match
+ the right part of the condition: a='oe'
+ ('o-umlaut' is not equal to 'oe' in utf8_general_ci,
+ which is the collation of the field "a").
+
+ If we change the right part from:
+ ... AND a='oe'
+ to
+ ... AND 'oe' COLLATE utf8_german2_ci='oe'
+ it will be evalulated to TRUE and removed from the condition,
+ so the overall query will be simplified to:
+
+ SELECT * FROM t1 WHERE a='oe' COLLATE utf8_german2_ci;
+
+ which will erroneously start to return both 'oe' and 'o-umlaut'.
+ So changing "expr" to "const" is not possible if the effective
+ collations of "target" and "source" are not exactly the same.
+
+ Note, the code before the fix for MDEV-7152 only checked that
+ collations of "source_const" and "target_value" are the same.
+ This was not enough, as the bug report demonstrated.
+ */
+ return
+ target->compare_collation() == source->compare_collation() &&
+ target_value->collation.collation == source_const->collation.collation;
+ }
+ return true; // Non-string comparison
+}
+
+
/*
change field = field to field = const for each found field = const in the
and_level
@@ -13611,6 +13682,7 @@ static void update_const_equal_items(COND *cond, JOIN_TAB *tab, bool const_key)
static void
change_cond_ref_to_const(THD *thd, I_List<COND_CMP> *save_list,
Item *and_father, Item *cond,
+ Item_bool_func2 *field_value_owner,
Item *field, Item *value)
{
if (cond->type() == Item::COND_ITEM)
@@ -13621,7 +13693,7 @@ change_cond_ref_to_const(THD *thd, I_List<COND_CMP> *save_list,
Item *item;
while ((item=li++))
change_cond_ref_to_const(thd, save_list,and_level ? cond : item, item,
- field, value);
+ field_value_owner, field, value);
return;
}
if (cond->eq_cmp_result() == Item::COND_OK)
@@ -13633,11 +13705,8 @@ change_cond_ref_to_const(THD *thd, I_List<COND_CMP> *save_list,
Item *right_item= args[1];
Item_func::Functype functype= func->functype();
- if (right_item->eq(field,0) && left_item != value &&
- right_item->cmp_context == field->cmp_context &&
- (left_item->result_type() != STRING_RESULT ||
- value->result_type() != STRING_RESULT ||
- left_item->collation.collation == value->collation.collation))
+ if (can_change_cond_ref_to_const(func, right_item, left_item,
+ field_value_owner, field, value))
{
Item *tmp=value->clone_item();
if (tmp)
@@ -13656,11 +13725,8 @@ change_cond_ref_to_const(THD *thd, I_List<COND_CMP> *save_list,
func->set_cmp_func();
}
}
- else if (left_item->eq(field,0) && right_item != value &&
- left_item->cmp_context == field->cmp_context &&
- (right_item->result_type() != STRING_RESULT ||
- value->result_type() != STRING_RESULT ||
- right_item->collation.collation == value->collation.collation))
+ else if (can_change_cond_ref_to_const(func, left_item, right_item,
+ field_value_owner, field, value))
{
Item *tmp= value->clone_item();
if (tmp)
@@ -13709,7 +13775,8 @@ propagate_cond_constants(THD *thd, I_List<COND_CMP> *save_list,
Item **args= cond_cmp->cmp_func->arguments();
if (!args[0]->const_item())
change_cond_ref_to_const(thd, &save,cond_cmp->and_level,
- cond_cmp->and_level, args[0], args[1]);
+ cond_cmp->and_level,
+ cond_cmp->cmp_func, args[0], args[1]);
}
}
}
@@ -13731,14 +13798,14 @@ propagate_cond_constants(THD *thd, I_List<COND_CMP> *save_list,
resolve_const_item(thd, &args[1], args[0]);
func->update_used_tables();
change_cond_ref_to_const(thd, save_list, and_father, and_father,
- args[0], args[1]);
+ func, args[0], args[1]);
}
else if (left_const)
{
resolve_const_item(thd, &args[0], args[1]);
func->update_used_tables();
change_cond_ref_to_const(thd, save_list, and_father, and_father,
- args[1], args[0]);
+ func, args[1], args[0]);
}
}
}
@@ -18835,7 +18902,8 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
records are read. Because of optimization in some cases it can
provide only select_limit_cnt+1 records.
*/
- if (join->order && join->filesort_found_rows &&
+ if (join->order && join->sortorder &&
+ join->filesort_found_rows &&
join->select_options & OPTION_FOUND_ROWS)
{
DBUG_PRINT("info", ("filesort NESTED_LOOP_QUERY_LIMIT"));
@@ -18857,8 +18925,9 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
/* Join over all rows in table; Return number of found rows */
TABLE *table=jt->table;
- join->select_options ^= OPTION_FOUND_ROWS;
- if (join->filesort_found_rows)
+ join->select_options ^= OPTION_FOUND_ROWS;
+ if (table->sort.record_pointers ||
+ (table->sort.io_cache && my_b_inited(table->sort.io_cache)))
{
/* Using filesort */
join->send_records= table->sort.found_records;
@@ -20689,11 +20758,7 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order,
select, filesort_limit, 0,
&examined_rows, &found_rows);
table->sort.found_records= filesort_retval;
- if (found_rows != HA_POS_ERROR)
- {
- tab->records= found_rows; // For SQL_CALC_ROWS
- join->filesort_found_rows= true;
- }
+ tab->records= found_rows; // For SQL_CALC_ROWS
if (quick_created)
{
diff --git a/sql/sql_select.h b/sql/sql_select.h
index 490d8c91a9e..7d53731b558 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -1341,7 +1341,6 @@ public:
emb_sjm_nest= NULL;
sjm_lookup_tables= 0;
- filesort_found_rows= false;
exec_saved_explain= false;
/*
The following is needed because JOIN::cleanup(true) may be called for
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index c6f18fa2a3c..841f67239b4 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -4312,7 +4312,7 @@ fill_schema_table_by_open(THD *thd, bool is_show_fields_or_keys,
Again we don't do this for SHOW COLUMNS/KEYS because
of backward compatibility.
*/
- if (!is_show_fields_or_keys && result && thd->is_error() &&
+ if (!is_show_fields_or_keys && result &&
(thd->get_stmt_da()->sql_errno() == ER_NO_SUCH_TABLE ||
thd->get_stmt_da()->sql_errno() == ER_WRONG_OBJECT))
{
@@ -5319,12 +5319,11 @@ err:
column with the error text, and clear the error so that the operation
can continue.
*/
- const char *error= thd->is_error() ? thd->get_stmt_da()->message() : "";
+ const char *error= thd->get_stmt_da()->message();
table->field[20]->store(error, strlen(error), cs);
push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
- thd->get_stmt_da()->sql_errno(),
- thd->get_stmt_da()->message());
+ thd->get_stmt_da()->sql_errno(), error);
thd->clear_error();
}
@@ -8118,12 +8117,13 @@ bool get_schema_tables_result(JOIN *join,
THD *thd= join->thd;
LEX *lex= thd->lex;
bool result= 0;
- const char *old_proc_info;
+ PSI_stage_info org_stage;
DBUG_ENTER("get_schema_tables_result");
Warnings_only_error_handler err_handler;
thd->push_internal_handler(&err_handler);
- old_proc_info= thd_proc_info(thd, "Filling schema table");
+ thd->enter_stage(&stage_filling_schema_table, &org_stage, __func__, __FILE__,
+ __LINE__);
JOIN_TAB *tab;
for (tab= first_linear_tab(join, WITHOUT_BUSH_ROOTS, WITH_CONST_TABLES);
@@ -8227,7 +8227,7 @@ bool get_schema_tables_result(JOIN *join,
}
else if (result)
my_error(ER_UNKNOWN_ERROR, MYF(0));
- thd_proc_info(thd, old_proc_info);
+ THD_STAGE_INFO(thd, org_stage);
DBUG_RETURN(result);
}
diff --git a/sql/sql_statistics.cc b/sql/sql_statistics.cc
index d368145ca73..4ce1f3ec22a 100644
--- a/sql/sql_statistics.cc
+++ b/sql/sql_statistics.cc
@@ -2355,9 +2355,15 @@ int collect_statistics_for_index(THD *thd, TABLE *table, uint index)
int rc= 0;
KEY *key_info= &table->key_info[index];
ha_rows rows= 0;
- Index_prefix_calc index_prefix_calc(table, key_info);
+
DBUG_ENTER("collect_statistics_for_index");
+ /* No statistics for FULLTEXT indexes. */
+ if (key_info->flags & HA_FULLTEXT)
+ DBUG_RETURN(rc);
+
+ Index_prefix_calc index_prefix_calc(table, key_info);
+
DEBUG_SYNC(table->in_use, "statistics_collection_start1");
DEBUG_SYNC(table->in_use, "statistics_collection_start2");
@@ -2391,7 +2397,7 @@ int collect_statistics_for_index(THD *thd, TABLE *table, uint index)
if (!rc)
index_prefix_calc.get_avg_frequency();
- DBUG_RETURN(rc);
+ DBUG_RETURN(rc);
}
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 34515d655e5..cf3f588b323 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -8450,9 +8450,21 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
}
/*
- If this is an ALTER TABLE and no explicit row type specified reuse
- the table's row type.
- Note : this is the same as if the row type was specified explicitly.
+ If foreign key is added then check permission to access parent table.
+
+ In function "check_fk_parent_table_access", create_info->db_type is used
+ to identify whether engine supports FK constraint or not. Since
+ create_info->db_type is set here, check to parent table access is delayed
+ till this point for the alter operation.
+ */
+ if ((alter_info->flags & Alter_info::ADD_FOREIGN_KEY) &&
+ check_fk_parent_table_access(thd, create_info, alter_info))
+ DBUG_RETURN(true);
+
+ /*
+ If this is an ALTER TABLE and no explicit row type specified reuse
+ the table's row type.
+ Note: this is the same as if the row type was specified explicitly.
*/
if (create_info->row_type == ROW_TYPE_NOT_USED)
{
@@ -9595,12 +9607,12 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
/*
- Recreates tables by calling mysql_alter_table().
+ Recreates one table by calling mysql_alter_table().
SYNOPSIS
mysql_recreate_table()
thd Thread handler
- tables Tables to recreate
+ table_list Table to recreate
table_copy Recreate the table by using
ALTER TABLE COPY algorithm
@@ -9612,13 +9624,15 @@ bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list, bool table_copy)
{
HA_CREATE_INFO create_info;
Alter_info alter_info;
- DBUG_ENTER("mysql_recreate_table");
- DBUG_ASSERT(!table_list->next_global);
+ TABLE_LIST *next_table= table_list->next_global;
+ DBUG_ENTER("mysql_recreate_table");
/* Set lock type which is appropriate for ALTER TABLE. */
table_list->lock_type= TL_READ_NO_INSERT;
/* Same applies to MDL request. */
table_list->mdl_request.set_type(MDL_SHARED_NO_WRITE);
+ /* hide following tables from open_tables() */
+ table_list->next_global= NULL;
bzero((char*) &create_info, sizeof(create_info));
create_info.row_type=ROW_TYPE_NOT_USED;
@@ -9630,9 +9644,11 @@ bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list, bool table_copy)
if (table_copy)
alter_info.requested_algorithm= Alter_info::ALTER_TABLE_ALGORITHM_COPY;
- DBUG_RETURN(mysql_alter_table(thd, NullS, NullS, &create_info,
+ bool res= mysql_alter_table(thd, NullS, NullS, &create_info,
table_list, &alter_info, 0,
- (ORDER *) 0, 0));
+ (ORDER *) 0, 0);
+ table_list->next_global= next_table;
+ DBUG_RETURN(res);
}
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index c9aafbf622c..b86516cbd9c 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -603,7 +603,10 @@ int mysql_update(THD *thd,
if (query_plan.index == MAX_KEY || (select && select->quick))
{
if (init_read_record(&info, thd, table, select, 0, 1, FALSE))
+ {
+ close_cached_file(&tempfile);
goto err;
+ }
}
else
init_read_record_idx(&info, thd, table, 1, query_plan.index, reverse);
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index 6a81301a6d9..20a16e3eae1 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -685,7 +685,7 @@ err:
/* number of required parameters for making view */
-static const int required_view_parameters= 14;
+static const int required_view_parameters= 15;
/*
table of VIEW .frm field descriptors
@@ -736,6 +736,9 @@ static File_option view_parameters[]=
{{(char*) STRING_WITH_LEN("view_body_utf8")},
my_offsetof(TABLE_LIST, view_body_utf8),
FILE_OPTIONS_ESTRING},
+ {{ C_STRING_WITH_LEN("mariadb-version")},
+ my_offsetof(TABLE_LIST, mariadb_version),
+ FILE_OPTIONS_ULONGLONG},
{{NullS, 0}, 0,
FILE_OPTIONS_STRING}
};
@@ -836,6 +839,7 @@ static int mysql_register_view(THD *thd, TABLE_LIST *view,
version 2 - empty definer_host means a role
*/
view->file_version= 2;
+ view->mariadb_version= MYSQL_VERSION_ID;
view->calc_md5(md5);
if (!(view->md5.str= (char*) thd->memdup(md5, 32)))
{
diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc
index dbbf16e07c1..561e3afcd54 100644
--- a/sql/sys_vars.cc
+++ b/sql/sys_vars.cc
@@ -3457,13 +3457,13 @@ static Sys_var_bit Sys_log_off(
static bool fix_sql_log_bin_after_update(sys_var *self, THD *thd,
enum_var_type type)
{
- if (type == OPT_SESSION)
- {
- if (thd->variables.sql_log_bin)
- thd->variables.option_bits |= OPTION_BIN_LOG;
- else
- thd->variables.option_bits &= ~OPTION_BIN_LOG;
- }
+ DBUG_ASSERT(type == OPT_SESSION);
+
+ if (thd->variables.sql_log_bin)
+ thd->variables.option_bits |= OPTION_BIN_LOG;
+ else
+ thd->variables.option_bits &= ~OPTION_BIN_LOG;
+
return FALSE;
}
@@ -3485,7 +3485,10 @@ static bool check_sql_log_bin(sys_var *self, THD *thd, set_var *var)
return TRUE;
if (var->type == OPT_GLOBAL)
- return FALSE;
+ {
+ my_error(ER_INCORRECT_GLOBAL_LOCAL_VAR, MYF(0), self->name.str, "SESSION");
+ return TRUE;
+ }
if (error_if_in_trans_or_substatement(thd,
ER_STORED_FUNCTION_PREVENTS_SWITCH_SQL_LOG_BIN,
@@ -3496,9 +3499,9 @@ static bool check_sql_log_bin(sys_var *self, THD *thd, set_var *var)
}
static Sys_var_mybool Sys_log_binlog(
- "sql_log_bin", "sql_log_bin",
- SESSION_VAR(sql_log_bin), NO_CMD_LINE,
- DEFAULT(TRUE), NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(check_sql_log_bin),
+ "sql_log_bin", "Controls whether logging to the binary log is done",
+ SESSION_VAR(sql_log_bin), NO_CMD_LINE, DEFAULT(TRUE),
+ NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(check_sql_log_bin),
ON_UPDATE(fix_sql_log_bin_after_update));
static Sys_var_bit Sys_sql_warnings(
@@ -4769,7 +4772,7 @@ static Sys_var_ulong Sys_sp_cache_size(
"The soft upper limit for number of cached stored routines for "
"one connection.",
GLOBAL_VAR(stored_program_cache_size), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(256, 512 * 1024), DEFAULT(256), BLOCK_SIZE(1));
+ VALID_RANGE(0, 512 * 1024), DEFAULT(256), BLOCK_SIZE(1));
export const char *plugin_maturity_names[]=
{ "unknown", "experimental", "alpha", "beta", "gamma", "stable", 0 };
diff --git a/sql/table.cc b/sql/table.cc
index 69b0faf0a9e..5b8809bbced 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -831,6 +831,24 @@ static bool create_key_infos(const uchar *strpos, const uchar *frm_image_end,
}
+/** ensures that the enum value (read from frm) is within limits
+
+ if not - issues a warning and resets the value to 0
+ (that is, 0 is assumed to be a default value)
+*/
+
+static uint enum_value_with_check(THD *thd, TABLE_SHARE *share,
+ const char *name, uint value, uint limit)
+{
+ if (value < limit)
+ return value;
+
+ sql_print_warning("%s.frm: invalid value %d for the field %s",
+ share->normalized_path.str, value, name);
+ return 0;
+}
+
+
/**
Check if a collation has changed number
@@ -840,8 +858,7 @@ static bool create_key_infos(const uchar *strpos, const uchar *frm_image_end,
@retval new collation number (same as current collation number of no change)
*/
-static uint
-upgrade_collation(ulong mysql_version, uint cs_number)
+static uint upgrade_collation(ulong mysql_version, uint cs_number)
{
if (mysql_version >= 50300 && mysql_version <= 50399)
{
@@ -865,8 +882,6 @@ upgrade_collation(ulong mysql_version, uint cs_number)
}
-
-
/**
Read data from a binary .frm file image into a TABLE_SHARE
@@ -1050,9 +1065,12 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
share->incompatible_version|= HA_CREATE_USED_CHARSET;
share->avg_row_length= uint4korr(frm_image+34);
- share->transactional= (ha_choice) (frm_image[39] & 3);
- share->page_checksum= (ha_choice) ((frm_image[39] >> 2) & 3);
- share->row_type= (enum row_type) frm_image[40];
+ share->transactional= (ha_choice)
+ enum_value_with_check(thd, share, "transactional", frm_image[39] & 3, HA_CHOICE_MAX);
+ share->page_checksum= (ha_choice)
+ enum_value_with_check(thd, share, "page_checksum", (frm_image[39] >> 2) & 3, HA_CHOICE_MAX);
+ share->row_type= (enum row_type)
+ enum_value_with_check(thd, share, "row_format", frm_image[40], ROW_TYPE_MAX);
if (cs_new && !(share->table_charset= get_charset(cs_new, MYF(MY_WME))))
goto err;
diff --git a/sql/table.h b/sql/table.h
index 58b78af6836..b978484158b 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -1898,6 +1898,7 @@ struct TABLE_LIST
LEX_STRING timestamp; /* GMT time stamp of last operation */
st_lex_user definer; /* definer of view */
ulonglong file_version; /* version of file's field set */
+ ulonglong mariadb_version; /* version of server on creation */
ulonglong updatable_view; /* VIEW can be updated */
/**
@brief The declared algorithm, if this is a view.
diff --git a/storage/connect/CMakeLists.txt b/storage/connect/CMakeLists.txt
index 40b2a84d87d..956372e4960 100644
--- a/storage/connect/CMakeLists.txt
+++ b/storage/connect/CMakeLists.txt
@@ -21,18 +21,18 @@ ha_connect.cc connect.cc user_connect.cc mycat.cc
fmdlex.c osutil.c plugutil.c rcmsg.c rcmsg.h
array.cpp blkfil.cpp colblk.cpp csort.cpp
filamap.cpp filamdbf.cpp filamfix.cpp filamtxt.cpp filamvct.cpp filamzip.cpp
-filter.cpp maputil.cpp myutil.cpp plgdbutl.cpp reldef.cpp tabcol.cpp
-tabdos.cpp tabfix.cpp tabfmt.cpp table.cpp tabmul.cpp taboccur.cpp
+filter.cpp json.cpp maputil.cpp myutil.cpp plgdbutl.cpp reldef.cpp tabcol.cpp
+tabdos.cpp tabfix.cpp tabfmt.cpp tabjson.cpp table.cpp tabmul.cpp taboccur.cpp
tabpivot.cpp tabsys.cpp tabtbl.cpp tabutil.cpp tabvct.cpp tabvir.cpp
tabxcl.cpp valblk.cpp value.cpp xindex.cpp xobject.cpp
array.h blkfil.h block.h catalog.h checklvl.h colblk.h connect.h csort.h
engmsg.h filamap.h filamdbf.h filamfix.h filamtxt.h filamvct.h filamzip.h
-filter.h global.h ha_connect.h inihandl.h maputil.h msgid.h mycat.h myutil.h
-os.h osutil.h plgcnx.h plgdbsem.h preparse.h reldef.h resource.h tabcol.h
-tabdos.h tabfix.h tabfmt.h tabmul.h taboccur.h tabpivot.h tabsys.h
-tabtbl.h tabutil.h tabvct.h tabvir.h tabxcl.h user_connect.h valblk.h value.h
-xindex.h xobject.h xtable.h)
+filter.h global.h ha_connect.h inihandl.h json.h maputil.h msgid.h mycat.h
+myutil.h os.h osutil.h plgcnx.h plgdbsem.h preparse.h reldef.h resource.h
+tabcol.h tabdos.h tabfix.h tabfmt.h tabjson.h tabmul.h taboccur.h tabpivot.h
+tabsys.h tabtbl.h tabutil.h tabvct.h tabvir.h tabxcl.h user_connect.h
+valblk.h value.h xindex.h xobject.h xtable.h)
#
# Definitions that are shared for all OSes
diff --git a/storage/connect/connect.cc b/storage/connect/connect.cc
index 87c782ba953..a54d8ebcc44 100644
--- a/storage/connect/connect.cc
+++ b/storage/connect/connect.cc
@@ -52,7 +52,7 @@
/* Routines called internally by semantic routines. */
/***********************************************************************/
void CntEndDB(PGLOBAL);
-RCODE EvalColumns(PGLOBAL g, PTDB tdbp, bool mrr= false);
+RCODE EvalColumns(PGLOBAL g, PTDB tdbp, bool reset, bool mrr= false);
/***********************************************************************/
/* MySQL routines called externally by semantic routines. */
@@ -388,7 +388,7 @@ bool CntRewindTable(PGLOBAL g, PTDB tdbp)
/***********************************************************************/
/* Evaluate all columns after a record is read. */
/***********************************************************************/
-RCODE EvalColumns(PGLOBAL g, PTDB tdbp, bool mrr)
+RCODE EvalColumns(PGLOBAL g, PTDB tdbp, bool reset, bool mrr)
{
RCODE rc= RC_OK;
PCOL colp;
@@ -413,7 +413,8 @@ RCODE EvalColumns(PGLOBAL g, PTDB tdbp, bool mrr)
for (colp= tdbp->GetColumns(); rc == RC_OK && colp;
colp= colp->GetNext()) {
- colp->Reset();
+ if (reset)
+ colp->Reset();
// Virtual columns are computed by MariaDB
if (!colp->GetColUse(U_VIRTUAL) && (!mrr || colp->GetKcol()))
@@ -457,6 +458,10 @@ RCODE CntReadNext(PGLOBAL g, PTDB tdbp)
goto err;
} // endif rc
+ // Do it now to avoid double eval when filtering
+ for (PCOL colp= tdbp->GetColumns(); colp; colp= colp->GetNext())
+ colp->Reset();
+
do {
if ((rc= (RCODE)tdbp->ReadDB(g)) == RC_OK)
if (!ApplyFilter(g, tdbp->GetFilter()))
@@ -466,7 +471,7 @@ RCODE CntReadNext(PGLOBAL g, PTDB tdbp)
err:
g->jump_level--;
- return (rc != RC_OK) ? rc : EvalColumns(g, tdbp);
+ return (rc != RC_OK) ? rc : EvalColumns(g, tdbp, false);
} // end of CntReadNext
/***********************************************************************/
@@ -812,7 +817,7 @@ RCODE CntIndexRead(PGLOBAL g, PTDB ptdb, OPVAL op,
rnd:
if ((rc= (RCODE)ptdb->ReadDB(g)) == RC_OK)
- rc= EvalColumns(g, ptdb, mrr);
+ rc= EvalColumns(g, ptdb, true, mrr);
return rc;
} // end of CntIndexRead
diff --git a/storage/connect/engmsg.h b/storage/connect/engmsg.h
index ad6dc6b5689..14808758efd 100644
--- a/storage/connect/engmsg.h
+++ b/storage/connect/engmsg.h
@@ -103,6 +103,10 @@
#define MSG_FILE_MAP_ERROR "CreateFileMapping %s error rc=%d"
#define MSG_FILE_OPEN_YET "File %s already open"
#define MSG_FILE_UNFOUND "File %s not found"
+#define MSG_FIX_OVFLW_ADD "Fixed Overflow on add"
+#define MSG_FIX_OVFLW_TIMES "Fixed Overflow on times"
+#define MSG_FIX_UNFLW_ADD "Fixed Underflow on add"
+#define MSG_FIX_UNFLW_TIMES "Fixed Underflow on times"
#define MSG_FLD_TOO_LNG_FOR "Field %d too long for %s line %d of %s"
#define MSG_FLT_BAD_RESULT "Float inexact result"
#define MSG_FLT_DENORMAL_OP "Float denormal operand"
@@ -318,3 +322,4 @@
#define MSG_XPATH_CNTX_ERR "Unable to create new XPath context"
#define MSG_XPATH_EVAL_ERR "Unable to evaluate xpath location '%s'"
#define MSG_XPATH_NOT_SUPP "Unsupported Xpath for column %s"
+#define MSG_ZERO_DIVIDE "Zero divide in expression"
diff --git a/storage/connect/filamtxt.h b/storage/connect/filamtxt.h
index b89d58965f9..864ca66dd34 100644
--- a/storage/connect/filamtxt.h
+++ b/storage/connect/filamtxt.h
@@ -26,6 +26,7 @@ class DllExport TXTFAM : public BLOCK {
friend class TDBCSV;
friend class TDBFIX;
friend class TDBVCT;
+ friend class TDBJSON;
friend class DOSCOL;
friend class BINCOL;
friend class VCTCOL;
diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc
index ebf286a049b..a0ac4668eba 100644
--- a/storage/connect/ha_connect.cc
+++ b/storage/connect/ha_connect.cc
@@ -1,4 +1,4 @@
-/* Copyright (C) Olivier Bertrand 2004 - 2014
+/* Copyright (C) Olivier Bertrand 2004 - 2015
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -170,8 +170,8 @@
#define SZWMIN 4194304 // Minimum work area size 4M
extern "C" {
- char version[]= "Version 1.03.0005 November 08, 2014";
- char compver[]= "Version 1.03.0005 " __DATE__ " " __TIME__;
+ char version[]= "Version 1.03.0006 January 13, 2015";
+ char compver[]= "Version 1.03.0006 " __DATE__ " " __TIME__;
#if defined(WIN32)
char slash= '\\';
@@ -714,7 +714,7 @@ ha_connect::ha_connect(handlerton *hton, TABLE_SHARE *table_arg)
datapath= "./";
#endif // !WIN32
tdbp= NULL;
- sdvalin= NULL;
+ sdvalin1= sdvalin2= sdvalin3= sdvalin4= NULL;
sdvalout= NULL;
xmod= MODE_ANY;
istable= false;
@@ -1055,6 +1055,14 @@ char *ha_connect::GetStringOption(char *opname, char *sdef)
opval= (char*)options->colist;
else if (!stricmp(opname, "Data_charset"))
opval= (char*)options->data_charset;
+ else if (!stricmp(opname, "Table_charset")) {
+ const CHARSET_INFO *chif= (tshp) ? tshp->table_charset
+ : table->s->table_charset;
+
+ if (chif)
+ opval= (char*)chif->csname;
+
+ } // endif Table_charset
if (!opval && options && options->oplist)
opval= GetListOption(xp->g, opname, options->oplist);
@@ -1326,6 +1334,7 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf)
datm.tm_mday= 12;
datm.tm_mon= 11;
datm.tm_year= 112;
+ mktime(&datm); // set other fields get proper day name
len= strftime(buf, 256, pdtp->OutFmt, &datm);
} else
len= 0;
@@ -1806,7 +1815,7 @@ int ha_connect::CloseTable(PGLOBAL g)
{
int rc= CntCloseTable(g, tdbp, nox, abort);
tdbp= NULL;
- sdvalin=NULL;
+ sdvalin1= sdvalin2= sdvalin3= sdvalin4= NULL;
sdvalout=NULL;
valid_info= false;
indexing= -1;
@@ -1960,7 +1969,7 @@ int ha_connect::ScanRecord(PGLOBAL g, uchar *buf)
char *fmt;
int rc= 0;
PCOL colp;
- PVAL value;
+ PVAL value, sdvalin;
Field *fp;
PTDBASE tp= (PTDBASE)tdbp;
String attribute(attr_buffer, sizeof(attr_buffer),
@@ -2003,25 +2012,45 @@ int ha_connect::ScanRecord(PGLOBAL g, uchar *buf)
value->SetValue(fp->val_real());
break;
case TYPE_DATE:
- if (!sdvalin)
- sdvalin= (DTVAL*)AllocateValue(xp->g, TYPE_DATE, 19);
-
// Get date in the format produced by MySQL fields
switch (fp->type()) {
case MYSQL_TYPE_DATE:
- fmt= "YYYY-MM-DD";
+ if (!sdvalin2) {
+ sdvalin2= (DTVAL*)AllocateValue(xp->g, TYPE_DATE, 19);
+ fmt= "YYYY-MM-DD";
+ ((DTVAL*)sdvalin2)->SetFormat(g, fmt, strlen(fmt));
+ } // endif sdvalin1
+
+ sdvalin= sdvalin2;
break;
case MYSQL_TYPE_TIME:
- fmt= "hh:mm:ss";
+ if (!sdvalin3) {
+ sdvalin3= (DTVAL*)AllocateValue(xp->g, TYPE_DATE, 19);
+ fmt= "hh:mm:ss";
+ ((DTVAL*)sdvalin3)->SetFormat(g, fmt, strlen(fmt));
+ } // endif sdvalin1
+
+ sdvalin= sdvalin3;
break;
case MYSQL_TYPE_YEAR:
- fmt= "YYYY";
+ if (!sdvalin4) {
+ sdvalin4= (DTVAL*)AllocateValue(xp->g, TYPE_DATE, 19);
+ fmt= "YYYY";
+ ((DTVAL*)sdvalin4)->SetFormat(g, fmt, strlen(fmt));
+ } // endif sdvalin1
+
+ sdvalin= sdvalin4;
break;
default:
- fmt= "YYYY-MM-DD hh:mm:ss";
+ if (!sdvalin1) {
+ sdvalin1= (DTVAL*)AllocateValue(xp->g, TYPE_DATE, 19);
+ fmt= "YYYY-MM-DD hh:mm:ss";
+ ((DTVAL*)sdvalin1)->SetFormat(g, fmt, strlen(fmt));
+ } // endif sdvalin1
+
+ sdvalin= sdvalin1;
} // endswitch type
- ((DTVAL*)sdvalin)->SetFormat(g, fmt, strlen(fmt));
sdvalin->SetNullable(colp->IsNullable());
fp->val_str(&attribute);
sdvalin->SetValue_psz(attribute.c_ptr_safe());
@@ -3805,6 +3834,7 @@ bool ha_connect::check_privileges(THD *thd, PTOS options, char *dbn)
case TAB_XML:
case TAB_INI:
case TAB_VEC:
+ case TAB_JSON:
if (options->filename && *options->filename) {
char *s, path[FN_REFLEN], dbpath[FN_REFLEN];
#if defined(WIN32)
@@ -4800,6 +4830,9 @@ static int connect_assisted_discovery(handlerton *hton, THD* thd,
#endif // WIN32
int port= 0, hdr= 0, mxr __attribute__((unused))= 0, mxe= 0, rc= 0;
int cop __attribute__((unused)) = 0;
+#if defined(ODBC_SUPPORT)
+ int cto= -1, qto= -1;
+#endif // ODBC_SUPPORT
uint tm, fnc= FNC_NO, supfnc= (FNC_NO | FNC_COL);
bool bif, ok= false, dbf= false;
TABTYPE ttp= TAB_UNDEF;
@@ -4834,7 +4867,7 @@ static int connect_assisted_discovery(handlerton *hton, THD* thd,
fncn= topt->catfunc;
fnc= GetFuncID(fncn);
sep= topt->separator;
- spc= (!sep || !strcmp(sep, "\\t")) ? '\t' : *sep;
+ spc= (!sep) ? ',' : (!strcmp(sep, "\\t")) ? '\t' : *sep;
qch= topt->qchar ? *topt->qchar : (signed)topt->quoted >= 0 ? '"' : 0;
hdr= (int)topt->header;
tbl= topt->tablist;
@@ -4859,6 +4892,8 @@ static int connect_assisted_discovery(handlerton *hton, THD* thd,
port= atoi(GetListOption(g, "port", topt->oplist, "0"));
#if defined(ODBC_SUPPORT)
mxr= atoi(GetListOption(g,"maxres", topt->oplist, "0"));
+ cto= atoi(GetListOption(g,"ConnectTimeout", topt->oplist, "-1"));
+ qto= atoi(GetListOption(g,"QueryTimeout", topt->oplist, "-1"));
#endif
mxe= atoi(GetListOption(g,"maxerr", topt->oplist, "0"));
#if defined(PROMPT_OK)
@@ -5077,14 +5112,15 @@ static int connect_assisted_discovery(handlerton *hton, THD* thd,
case FNC_NO:
case FNC_COL:
if (src) {
- qrp= ODBCSrcCols(g, dsn, (char*)src);
+ qrp= ODBCSrcCols(g, dsn, (char*)src, cto, qto);
src= NULL; // for next tests
} else
- qrp= ODBCColumns(g, dsn, shm, tab, NULL, mxr, fnc == FNC_COL);
+ qrp= ODBCColumns(g, dsn, shm, tab, NULL,
+ mxr, cto, qto, fnc == FNC_COL);
break;
case FNC_TABLE:
- qrp= ODBCTables(g, dsn, shm, tab, mxr, true);
+ qrp= ODBCTables(g, dsn, shm, tab, mxr, cto, qto, true);
break;
case FNC_DSN:
qrp= ODBCDataSources(g, mxr, true);
@@ -5190,6 +5226,7 @@ static int connect_assisted_discovery(handlerton *hton, THD* thd,
tm= NOT_NULL_FLAG;
cnm= (char*)"noname";
dft= xtra= key= NULL;
+ v= ' ';
#if defined(NEW_WAY)
rem= "";
// cs= NULL;
@@ -5200,7 +5237,13 @@ static int connect_assisted_discovery(handlerton *hton, THD* thd,
for (crp= qrp->Colresp; crp; crp= crp->Next)
switch (crp->Fld) {
case FLD_NAME:
- cnm= encode(g, crp->Kdata->GetCharValue(i));
+ if (ttp == TAB_CSV && topt->data_charset &&
+ (!stricmp(topt->data_charset, "UTF8") ||
+ !stricmp(topt->data_charset, "UTF-8")))
+ cnm= crp->Kdata->GetCharValue(i);
+ else
+ cnm= encode(g, crp->Kdata->GetCharValue(i));
+
break;
case FLD_TYPE:
typ= crp->Kdata->GetIntValue(i);
diff --git a/storage/connect/ha_connect.h b/storage/connect/ha_connect.h
index 6c3ed87d5f6..922a69a3991 100644
--- a/storage/connect/ha_connect.h
+++ b/storage/connect/ha_connect.h
@@ -543,7 +543,10 @@ protected:
query_id_t creat_query_id; // The one when handler was allocated
char *datapath; // Is the Path of DB data directory
PTDB tdbp; // To table class object
- PVAL sdvalin; // Used to convert date values
+ PVAL sdvalin1; // Used to convert date values
+ PVAL sdvalin2; // Used to convert date values
+ PVAL sdvalin3; // Used to convert date values
+ PVAL sdvalin4; // Used to convert date values
PVAL sdvalout; // Used to convert date values
bool istable; // True for table handler
char partname[64]; // The partition name
diff --git a/storage/connect/json.cpp b/storage/connect/json.cpp
new file mode 100644
index 00000000000..983f45d9cee
--- /dev/null
+++ b/storage/connect/json.cpp
@@ -0,0 +1,1055 @@
+/*************** json CPP Declares Source Code File (.H) ***************/
+/* Name: json.cpp Version 1.0 */
+/* */
+/* (C) Copyright to the author Olivier BERTRAND 2014 - 2015 */
+/* */
+/* This file contains the JSON classes functions. */
+/***********************************************************************/
+
+/***********************************************************************/
+/* Include relevant sections of the MariaDB header file. */
+/***********************************************************************/
+#include <my_global.h>
+
+/***********************************************************************/
+/* Include application header files: */
+/* global.h is header containing all global declarations. */
+/* plgdbsem.h is header containing the DB application declarations. */
+/* xjson.h is header containing the JSON classes declarations. */
+/***********************************************************************/
+#include "global.h"
+#include "plgdbsem.h"
+#include "json.h"
+
+#define ARGS MY_MIN(24,len-i),s+MY_MAX(i-3,0)
+
+#if defined(WIN32)
+#define EL "\r\n"
+#else
+#define EL "\n"
+#endif
+
+/***********************************************************************/
+/* Parse a json string. */
+/***********************************************************************/
+PJSON ParseJson(PGLOBAL g, char *s, int len, int pretty, bool *comma)
+{
+ int i;
+ bool b = false;
+ PJSON jsp = NULL;
+ STRG src;
+
+ if (!s || !len) {
+ strcpy(g->Message, "Void JSON object");
+ return NULL;
+ } else if (comma)
+ *comma = false;
+
+ src.str = s;
+ src.len = len;
+
+ for (i = 0; i < len; i++)
+ switch (s[i]) {
+ case '[':
+ if (jsp) {
+ strcpy(g->Message, "More than one item in file");
+ return NULL;
+ } else if (!(jsp = ParseArray(g, ++i, src)))
+ return NULL;
+
+ break;
+ case '{':
+ if (jsp) {
+ strcpy(g->Message, "More than one item in file");
+ return NULL;
+ } else if (!(jsp = ParseObject(g, ++i, src)))
+ return NULL;
+ break;
+ case ' ':
+ case '\t':
+ case '\n':
+ case '\r':
+ break;
+ case ',':
+ if (jsp && pretty == 1) {
+ if (comma)
+ *comma = true;
+
+ break;
+ } // endif pretty
+
+ sprintf(g->Message, "Unexpected ',' (pretty=%d)", pretty);
+ return NULL;
+ case '(':
+ b = true;
+ break;
+ case ')':
+ if (b) {
+ b = false;
+ break;
+ } // endif b
+
+ default:
+ sprintf(g->Message, "Bad '%c' character near %.*s",
+ s[i], ARGS);
+ return NULL;
+ }; // endswitch s[i]
+
+ if (!jsp)
+ sprintf(g->Message, "Invalid Json string '%.*s'", 50, s);
+
+ return jsp;
+} // end of ParseJson
+
+/***********************************************************************/
+/* Parse a JSON Array. */
+/***********************************************************************/
+PJAR ParseArray(PGLOBAL g, int& i, STRG& src)
+{
+ char *s = src.str;
+ int len = src.len;
+ int level = 0;
+ PJAR jarp = new(g) JARRAY;
+ PJVAL jvp = NULL;
+
+ for (; i < len; i++)
+ switch (s[i]) {
+ case ',':
+ if (level < 2) {
+ sprintf(g->Message, "Unexpected ',' near %.*s",ARGS);
+ return NULL;
+ } else
+ level = 1;
+
+ break;
+ case ']':
+ if (level == 1) {
+ sprintf(g->Message, "Unexpected ',]' near %.*s", ARGS);
+ return NULL;
+ } // endif level
+
+ jarp->InitArray(g);
+ return jarp;
+ case ' ':
+ case '\t':
+ case '\n':
+ case '\r':
+ break;
+ default:
+ if (level == 2) {
+ sprintf(g->Message, "Unexpected value near %.*s", ARGS);
+ return NULL;
+ } else if ((jvp = ParseValue(g, i, src))) {
+ jarp->AddValue(g, jvp);
+ level = 2;
+ } else
+ return NULL;
+
+ level = 2;
+ break;
+ }; // endswitch s[i]
+
+ strcpy(g->Message, "Unexpected EOF in array");
+ return NULL;
+} // end of ParseArray
+
+/***********************************************************************/
+/* Parse a JSON Object. */
+/***********************************************************************/
+PJOB ParseObject(PGLOBAL g, int& i, STRG& src)
+{
+ PSZ key;
+ char *s = src.str;
+ int len = src.len;
+ int level = 0;
+ PJOB jobp = new(g) JOBJECT;
+ PJPR jpp = NULL;
+
+ for (; i < len; i++)
+ switch (s[i]) {
+ case '"':
+ if (level < 2) {
+ if ((key = ParseString(g, ++i, src))) {
+ jpp = jobp->AddPair(g, key);
+ level = 1;
+ } else
+ return NULL;
+
+ } else {
+ sprintf(g->Message, "misplaced string near %.*s", ARGS);
+ return NULL;
+ } // endif level
+
+ break;
+ case ':':
+ if (level == 1) {
+ if (!(jpp->Val = ParseValue(g, ++i, src)))
+ return NULL;
+
+ level = 2;
+ } else {
+ sprintf(g->Message, "Unexpected ':' near %.*s", ARGS);
+ return NULL;
+ } // endif level
+
+ break;
+ case ',':
+ if (level < 2) {
+ sprintf(g->Message, "Unexpected ',' near %.*s", ARGS);
+ return NULL;
+ } else
+ level = 1;
+
+ break;
+ case '}':
+ if (level == 1) {
+ sprintf(g->Message, "Unexpected '}' near %.*s", ARGS);
+ return NULL;
+ } // endif level
+
+ return jobp;
+ case ' ':
+ case '\t':
+ case '\n':
+ case '\r':
+ break;
+ default:
+ sprintf(g->Message, "Unexpected character '%c' near %.*s",
+ s[i], ARGS);
+ return NULL;
+ }; // endswitch s[i]
+
+ strcpy(g->Message, "Unexpected EOF in Object");
+ return NULL;
+} // end of ParseObject
+
+/***********************************************************************/
+/* Parse a JSON Value. */
+/***********************************************************************/
+PJVAL ParseValue(PGLOBAL g, int& i, STRG& src)
+{
+ char *strval, *s = src.str;
+ int n, len = src.len;
+ PJVAL jvp = new(g) JVALUE;
+
+ for (; i < len; i++)
+ switch (s[i]) {
+ case ' ':
+ case '\t':
+ case '\n':
+ case '\r':
+ break;
+ default:
+ goto suite;
+ } // endswitch
+
+ suite:
+ switch (s[i]) {
+ case '[':
+ if (!(jvp->Jsp = ParseArray(g, ++i, src)))
+ return NULL;
+
+ break;
+ case '{':
+ if (!(jvp->Jsp = ParseObject(g, ++i, src)))
+ return NULL;
+
+ break;
+ case '"':
+ if ((strval = ParseString(g, ++i, src)))
+ jvp->Value = AllocateValue(g, strval, TYPE_STRING);
+ else
+ return NULL;
+
+ break;
+ case 't':
+ if (!strncmp(s + i, "true", 4)) {
+ n = 1;
+ jvp->Value = AllocateValue(g, &n, TYPE_TINY);
+ i += 3;
+ } else
+ goto err;
+
+ break;
+ case 'f':
+ if (!strncmp(s + i, "false", 5)) {
+ n = 0;
+ jvp->Value = AllocateValue(g, &n, TYPE_TINY);
+ i += 4;
+ } else
+ goto err;
+
+ break;
+ case 'n':
+ if (!strncmp(s + i, "null", 4))
+ i += 3;
+ else
+ goto err;
+
+ break;
+ case '-':
+ default:
+ if (s[i] == '-' || isdigit(s[i])) {
+ if (!(jvp->Value = ParseNumeric(g, i, src)))
+ goto err;
+
+ } else
+ goto err;
+
+ }; // endswitch s[i]
+
+ jvp->Size = 1;
+ return jvp;
+
+err:
+ sprintf(g->Message, "Unexpected character '%c' near %.*s",
+ s[i], ARGS);
+ return NULL;
+} // end of ParseValue
+
+/***********************************************************************/
+/* Unescape and parse a JSON string. */
+/***********************************************************************/
+char *ParseString(PGLOBAL g, int& i, STRG& src)
+{
+ char *p, *s = src.str;
+ int n = 0, len = src.len;
+
+ // The size to allocate is not known yet
+ p = (char*)PlugSubAlloc(g, NULL, 0);
+
+ for (; i < len; i++)
+ switch (s[i]) {
+ case '"':
+ p[n++] = 0;
+ PlugSubAlloc(g, NULL, n);
+ return p;
+ case '\\':
+ if (++i < len) {
+ if (s[i] == 'u') {
+ if (len - i > 5) {
+// if (charset == utf8) {
+ char xs[5];
+ uint hex;
+
+ xs[0] = s[++i];
+ xs[1] = s[++i];
+ xs[2] = s[++i];
+ xs[3] = s[++i];
+ xs[4] = 0;
+ hex = strtoul(xs, NULL, 16);
+
+ if (hex < 0x80) {
+ p[n] = (uchar)hex;
+ } else if (hex < 0x800) {
+ p[n++] = (uchar)(0xC0 | (hex >> 6));
+ p[n] = (uchar)(0x80 | (hex & 0x3F));
+ } else if (hex < 0x10000) {
+ p[n++] = (uchar)(0xE0 | (hex >> 12));
+ p[n++] = (uchar)(0x80 | ((hex >> 6) & 0x3f));
+ p[n] = (uchar)(0x80 | (hex & 0x3f));
+ } else
+ p[n] = '?';
+
+#if 0
+ } else {
+ char xs[3];
+ UINT hex;
+
+ i += 2;
+ xs[0] = s[++i];
+ xs[1] = s[++i];
+ xs[2] = 0;
+ hex = strtoul(xs, NULL, 16);
+ p[n] = (char)hex;
+ } // endif charset
+#endif // 0
+ } else
+ goto err;
+
+ } else switch(s[i]) {
+ case 't': p[n] = '\t'; break;
+ case 'n': p[n] = '\n'; break;
+ case 'r': p[n] = '\r'; break;
+ case 'b': p[n] = '\b'; break;
+ case 'f': p[n] = '\f'; break;
+ default: p[n] = s[i]; break;
+ } // endswitch
+
+ n++;
+ } else
+ goto err;
+
+ break;
+ default:
+ p[n++] = s[i];
+ break;
+ }; // endswitch s[i]
+
+ err:
+ strcpy(g->Message, "Unexpected EOF in String");
+ return NULL;
+} // end of ParseString
+
+/***********************************************************************/
+/* Parse a JSON numeric value. */
+/***********************************************************************/
+PVAL ParseNumeric(PGLOBAL g, int& i, STRG& src)
+{
+ char *s = src.str, buf[50];
+ int n = 0, len = src.len;
+ short nd = 0;
+ bool has_dot = false;
+ bool has_e = false;
+ bool found_digit = false;
+ PVAL valp = NULL;
+
+ for (; i < len; i++) {
+ switch (s[i]) {
+ case '.':
+ if (!found_digit || has_dot || has_e)
+ goto err;
+
+ has_dot = true;
+ break;
+ case 'e':
+ case 'E':
+ if (!found_digit || has_e)
+ goto err;
+
+ has_e = true;
+ found_digit = false;
+ break;
+ case '+':
+ if (!has_e)
+ goto err;
+
+ // passthru
+ case '-':
+ if (found_digit)
+ goto err;
+
+ break;
+ default:
+ if (isdigit(s[i])) {
+ if (has_dot && !has_e)
+ nd++; // Number of decimals
+
+ found_digit = true;
+ } else
+ goto fin;
+
+ }; // endswitch s[i]
+
+ buf[n++] = s[i];
+ } // endfor i
+
+ fin:
+ if (found_digit) {
+ buf[n] = 0;
+
+ if (has_dot || has_e) {
+ double dv = strtod(buf, NULL);
+
+ valp = AllocateValue(g, &dv, TYPE_DOUBLE, nd);
+ } else {
+ int iv = strtol(buf, NULL, 10);
+
+ valp = AllocateValue(g, &iv, TYPE_INT);
+ } // endif has
+
+ i--; // Unstack following character
+ return valp;
+ } else {
+ strcpy(g->Message, "No digit found");
+ return NULL;
+ } // endif found_digit
+
+ err:
+ strcpy(g->Message, "Unexpected EOF in number");
+ return NULL;
+} // end of ParseNumeric
+
+/***********************************************************************/
+/* Serialize a JSON tree: */
+/***********************************************************************/
+PSZ Serialize(PGLOBAL g, PJSON jsp, FILE *fs, int pretty)
+{
+ bool b = false, err = true;
+ JOUT *jp;
+
+ g->Message[0] = 0;
+
+ if (!jsp) {
+ strcpy(g->Message, "Null json tree");
+ return NULL;
+ } else if (!fs) {
+ // Serialize to a string
+ jp = new(g) JOUTSTR(g);
+ b = pretty == 1;
+ } else if (pretty == 2) {
+ // Serialize to a pretty file
+ jp = new(g) JOUTPRT(g, fs);
+ } else {
+ // Serialize to a flat file
+ jp = new(g) JOUTFILE(g, fs);
+ b = pretty == 1;
+ } // endif's
+
+ switch (jsp->GetType()) {
+ case TYPE_JAR:
+ err = SerializeArray(jp, (PJAR)jsp, b);
+ break;
+ case TYPE_JOB:
+ err = (b && jp->WriteChr('\t'));
+ err |= SerializeObject(jp, (PJOB)jsp);
+ break;
+ default:
+ strcpy(g->Message, "json tree is not an Array or an Object");
+ } // endswitch Type
+
+ if (fs) {
+ fputc('\n', fs);
+ fclose(fs);
+ return (err) ? g->Message : NULL;
+ } else if (!err) {
+ PSZ str = ((JOUTSTR*)jp)->Strp;
+
+ jp->WriteChr('\0');
+ PlugSubAlloc(g, NULL, ((JOUTSTR*)jp)->N);
+ return str;
+ } else {
+ if (!g->Message[0])
+ strcpy(g->Message, "Error in Serialize");
+
+ return NULL;
+ } // endif's
+
+} // end of Serialize
+
+/***********************************************************************/
+/* Serialize a JSON Array. */
+/***********************************************************************/
+bool SerializeArray(JOUT *js, PJAR jarp, bool b)
+{
+ bool first = true;
+
+
+ if (js->WriteChr('['))
+ return true;
+ else if (b && (js->WriteStr(EL) || js->WriteChr('\t')))
+ return true;
+
+ for (int i = 0; i < jarp->size(); i++) {
+ if (first)
+ first = false;
+ else if (js->WriteChr(','))
+ return true;
+ else if (b && (js->WriteStr(EL) || js->WriteChr('\t')))
+ return true;
+
+ if (SerializeValue(js, jarp->GetValue(i)))
+ return true;
+
+ } // endfor i
+
+ if (b && js->WriteStr(EL))
+ return true;
+
+ return js->WriteChr(']');
+} // end of SerializeArray
+
+/***********************************************************************/
+/* Serialize a JSON Object. */
+/***********************************************************************/
+bool SerializeObject(JOUT *js, PJOB jobp)
+{
+ bool first = true;
+
+ if (js->WriteChr('{'))
+ return true;
+
+ for (PJPR pair = jobp->First; pair; pair = pair->Next) {
+ if (first)
+ first = false;
+ else if (js->WriteChr(','))
+ return true;
+
+ if (js->WriteChr('\"') ||
+ js->WriteStr(pair->Key) ||
+ js->WriteChr('\"') ||
+ js->WriteChr(':') ||
+ SerializeValue(js, pair->Val))
+ return true;
+
+ } // endfor i
+
+ return js->WriteChr('}');
+} // end of SerializeObject
+
+/***********************************************************************/
+/* Serialize a JSON Value. */
+/***********************************************************************/
+bool SerializeValue(JOUT *js, PJVAL jvp)
+{
+ PJAR jap;
+ PJOB jop;
+ PVAL valp;
+
+ if ((jap = jvp->GetArray()))
+ return SerializeArray(js, jap, false);
+ else if ((jop = jvp->GetObject()))
+ return SerializeObject(js, jop);
+ else if (!(valp = jvp->Value) || valp->IsNull())
+ return js->WriteStr("null");
+ else switch (valp->GetType()) {
+ case TYPE_TINY:
+ return js->WriteStr(valp->GetTinyValue() ? "true" : "false");
+ case TYPE_STRING:
+ return js->Escape(valp->GetCharValue());
+ default:
+ if (valp->IsTypeNum()) {
+ char buf[32];
+
+ return js->WriteStr(valp->GetCharString(buf));
+ } // endif valp
+
+ } // endswitch Type
+
+strcpy(js->g->Message, "Unrecognized value");
+return true;
+} // end of SerializeValue
+
+/* -------------------------- Class JOUTSTR -------------------------- */
+
+/***********************************************************************/
+/* JOUTSTR constructor. */
+/***********************************************************************/
+JOUTSTR::JOUTSTR(PGLOBAL g) : JOUT(g)
+{
+ PPOOLHEADER pph = (PPOOLHEADER)g->Sarea;
+
+ N = 0;
+ Max = pph->FreeBlk;
+ Max = (Max > 512) ? Max - 512 : Max;
+ Strp = (char*)PlugSubAlloc(g, NULL, 0); // Size not know yet
+} // end of JOUTSTR constructor
+
+/***********************************************************************/
+/* Concatenate a string to the Serialize string. */
+/***********************************************************************/
+bool JOUTSTR::WriteStr(const char *s)
+{
+ if (s) {
+ size_t len = strlen(s);
+
+ if (N + len > Max)
+ return true;
+
+ memcpy(Strp + N, s, len);
+ N += len;
+ return false;
+ } else
+ return true;
+
+} // end of WriteStr
+
+/***********************************************************************/
+/* Concatenate a character to the Serialize string. */
+/***********************************************************************/
+bool JOUTSTR::WriteChr(const char c)
+{
+ if (N + 1 > Max)
+ return true;
+
+ Strp[N++] = c;
+ return false;
+} // end of WriteChr
+
+/***********************************************************************/
+/* Escape and Concatenate a string to the Serialize string. */
+/***********************************************************************/
+bool JOUTSTR::Escape(const char *s)
+{
+ WriteChr('"');
+
+ for (unsigned int i = 0; i < strlen(s); i++)
+ switch (s[i]) {
+ case '\t':
+ case '\n':
+ case '\r':
+ case '\b':
+ case '\f':
+ case '"': WriteChr('\\');
+ // passthru
+ default:
+ WriteChr(s[i]);
+ break;
+ } // endswitch s[i]
+
+ WriteChr('"');
+ return false;
+} // end of Escape
+
+/* ------------------------- Class JOUTFILE -------------------------- */
+
+/***********************************************************************/
+/* Write a string to the Serialize file. */
+/***********************************************************************/
+bool JOUTFILE::WriteStr(const char *s)
+{
+ // This is temporary
+ fputs(s, Stream);
+ return false;
+} // end of WriteStr
+
+/***********************************************************************/
+/* Write a character to the Serialize file. */
+/***********************************************************************/
+bool JOUTFILE::WriteChr(const char c)
+{
+ // This is temporary
+ fputc(c, Stream);
+ return false;
+} // end of WriteChr
+
+/***********************************************************************/
+/* Escape and Concatenate a string to the Serialize string. */
+/***********************************************************************/
+bool JOUTFILE::Escape(const char *s)
+{
+ // This is temporary
+ fputc('"', Stream);
+
+ for (unsigned int i = 0; i < strlen(s); i++)
+ switch (s[i]) {
+ case '\t': fputs("\\t", Stream); break;
+ case '\n': fputs("\\n", Stream); break;
+ case '\r': fputs("\\r", Stream); break;
+ case '\b': fputs("\\b", Stream); break;
+ case '\f': fputs("\\f", Stream); break;
+ case '"': fputs("\\\"", Stream); break;
+ default:
+ fputc(s[i], Stream);
+ break;
+ } // endswitch s[i]
+
+ fputc('"', Stream);
+ return false;
+} // end of Escape
+
+/* ------------------------- Class JOUTPRT --------------------------- */
+
+/***********************************************************************/
+/* Write a string to the Serialize pretty file. */
+/***********************************************************************/
+bool JOUTPRT::WriteStr(const char *s)
+{
+ // This is temporary
+ if (B) {
+ fputs(EL, Stream);
+ M--;
+
+ for (int i = 0; i < M; i++)
+ fputc('\t', Stream);
+
+ B = false;
+ } // endif B
+
+ fputs(s, Stream);
+ return false;
+} // end of WriteStr
+
+/***********************************************************************/
+/* Write a character to the Serialize pretty file. */
+/***********************************************************************/
+bool JOUTPRT::WriteChr(const char c)
+{
+ switch (c) {
+ case ':':
+ fputs(": ", Stream);
+ break;
+ case '{':
+ case '[':
+#if 0
+ if (M)
+ fputs(EL, Stream);
+
+ for (int i = 0; i < M; i++)
+ fputc('\t', Stream);
+#endif // 0
+
+ fputc(c, Stream);
+ fputs(EL, Stream);
+ M++;
+
+ for (int i = 0; i < M; i++)
+ fputc('\t', Stream);
+
+ break;
+ case '}':
+ case ']':
+ M--;
+ fputs(EL, Stream);
+
+ for (int i = 0; i < M; i++)
+ fputc('\t', Stream);
+
+ fputc(c, Stream);
+ B = true;
+ break;
+ case ',':
+ fputc(c, Stream);
+ fputs(EL, Stream);
+
+ for (int i = 0; i < M; i++)
+ fputc('\t', Stream);
+
+ B = false;
+ break;
+ default:
+ fputc(c, Stream);
+ } // endswitch c
+
+return false;
+} // end of WriteChr
+
+/* -------------------------- Class JOBJECT -------------------------- */
+
+/***********************************************************************/
+/* Add a new pair to an Object. */
+/***********************************************************************/
+PJPR JOBJECT::AddPair(PGLOBAL g, PSZ key)
+{
+ PJPR jpp = new(g) JPAIR(key);
+
+ if (Last)
+ Last->Next = jpp;
+ else
+ First = jpp;
+
+ Last = jpp;
+ Size++;
+ return jpp;
+} // end of AddPair
+
+/***********************************************************************/
+/* Get the value corresponding to the given key. */
+/***********************************************************************/
+PJVAL JOBJECT::GetValue(const char* key)
+{
+ for (PJPR jp = First; jp; jp = jp->Next)
+ if (!strcmp(jp->Key, key))
+ return jp->Val;
+
+ return NULL;
+} // end of GetValue;
+
+/***********************************************************************/
+/* Return the text corresponding to all keys (XML like). */
+/***********************************************************************/
+PSZ JOBJECT::GetText(PGLOBAL g)
+{
+ char *p, *text = (char*)PlugSubAlloc(g, NULL, 0);
+ bool b = true;
+
+ if (!First)
+ return NULL;
+ else for (PJPR jp = First; jp; jp = jp->Next) {
+ if (!(p = jp->Val->GetString()))
+ p = "???";
+
+ if (b) {
+ strcpy(text, p);
+ b = false;
+ } else
+ strcat(strcat(text, " "), p);
+
+ } // endfor jp
+
+ PlugSubAlloc(g, NULL, strlen(text) + 1);
+ return text;
+} // end of GetValue;
+
+/***********************************************************************/
+/* Set or add a value corresponding to the given key. */
+/***********************************************************************/
+void JOBJECT::SetValue(PGLOBAL g, PJVAL jvp, PSZ key)
+{
+ PJPR jp;
+
+ for (jp = First; jp; jp = jp->Next)
+ if (!strcmp(jp->Key, key)) {
+ jp->Val = jvp;
+ break;
+ } // endif key
+
+ if (!jp) {
+ jp = AddPair(g, key);
+ jp->Val = jvp;
+ } // endif jp
+
+} // end of SetValue
+
+/* -------------------------- Class JARRAY --------------------------- */
+
+/***********************************************************************/
+/* Make the array of values from the values list. */
+/***********************************************************************/
+void JARRAY::InitArray(PGLOBAL g)
+{
+ int i;
+ PJVAL jvp;
+
+ for (Size = 0, jvp = First; jvp; jvp = jvp->Next)
+ if (!jvp->Del)
+ Size++;
+
+ if (!Size) {
+ return;
+ } else if (Size > Alloc) {
+ // No need to realloc after deleting values
+ Mvals = (PJVAL*)PlugSubAlloc(g, NULL, Size * sizeof(PJVAL));
+ Alloc = Size;
+ } // endif Size
+
+ for (i = 0, jvp = First; jvp; jvp = jvp->Next)
+ if (!jvp->Del)
+ Mvals[i++] = jvp;
+
+} // end of InitArray
+
+/***********************************************************************/
+/* Get the Nth value of an Array. */
+/***********************************************************************/
+PJVAL JARRAY::GetValue(int i)
+{
+ if (Mvals && i >= 0 && i < Size)
+ return Mvals[i];
+ else
+ return NULL;
+} // end of GetValue
+
+/***********************************************************************/
+/* Add a Value to the Arrays Value list. */
+/***********************************************************************/
+PJVAL JARRAY::AddValue(PGLOBAL g, PJVAL jvp)
+{
+ if (!jvp)
+ jvp = new(g) JVALUE;
+
+ if (Last)
+ Last->Next = jvp;
+ else
+ First = jvp;
+
+ Last = jvp;
+ return jvp;
+} // end of AddValue
+
+/***********************************************************************/
+/* Add a Value to the Arrays Value list. */
+/***********************************************************************/
+bool JARRAY::SetValue(PGLOBAL g, PJVAL jvp, int n)
+{
+ int i = 0;
+ PJVAL jp, *jpp = &First;
+
+ for (i = 0, jp = First; i < n; i++, jp = *(jpp = &jp->Next))
+ if (!jp)
+ *jpp = jp = new(g) JVALUE;
+
+ *jpp = jvp;
+ jvp->Next = (jp ? jp->Next : NULL);
+ return false;
+} // end of SetValue
+
+/***********************************************************************/
+/* Delete a Value from the Arrays Value list. */
+/***********************************************************************/
+bool JARRAY::DeleteValue(int n)
+{
+ PJVAL jvp = GetValue(n);
+
+ if (jvp) {
+ jvp->Del = true;
+ return false;
+ } else
+ return true;
+
+} // end of DeleteValue
+
+/* -------------------------- Class JVALUE- -------------------------- */
+
+/***********************************************************************/
+/* Constructor for a Value with a given string or numeric value. */
+/***********************************************************************/
+JVALUE::JVALUE(PGLOBAL g, PVAL valp) : JSON()
+{
+ Jsp = NULL;
+ Value = AllocateValue(g, valp);
+ Next = NULL;
+ Del = false;
+} // end of JVALUE constructor
+
+/***********************************************************************/
+/* Returns the type of the Value's value. */
+/***********************************************************************/
+JTYP JVALUE::GetValType(void)
+{
+ if (Jsp)
+ return Jsp->GetType();
+ else if (Value)
+ return (JTYP)Value->GetType();
+ else
+ return (JTYP)TYPE_VOID;
+
+} // end of GetValType
+
+/***********************************************************************/
+/* Return the Value's Object value. */
+/***********************************************************************/
+PJOB JVALUE::GetObject(void)
+{
+ if (Jsp && Jsp->GetType() == TYPE_JOB)
+ return (PJOB)Jsp;
+
+ return NULL;
+} // end of GetObject
+
+/***********************************************************************/
+/* Return the Value's Array value. */
+/***********************************************************************/
+PJAR JVALUE::GetArray(void)
+{
+ if (Jsp && Jsp->GetType() == TYPE_JAR)
+ return (PJAR)Jsp;
+
+ return NULL;
+} // end of GetArray
+
+/***********************************************************************/
+/* Return the Value's Integer value. */
+/***********************************************************************/
+int JVALUE::GetInteger(void)
+{
+ return (Value) ? Value->GetIntValue() : 0;
+} // end of GetInteger
+
+/***********************************************************************/
+/* Return the Value's Double value. */
+/***********************************************************************/
+double JVALUE::GetFloat(void)
+{
+ return (Value) ? Value->GetFloatValue() : 0.0;
+} // end of GetFloat
+
+/***********************************************************************/
+/* Return the Value's String value. */
+/***********************************************************************/
+PSZ JVALUE::GetString(void)
+{
+ char buf[32];
+ return (Value) ? Value->GetCharString(buf) : NULL;
+} // end of GetString
+
diff --git a/storage/connect/json.h b/storage/connect/json.h
new file mode 100644
index 00000000000..11e15c3acd4
--- /dev/null
+++ b/storage/connect/json.h
@@ -0,0 +1,246 @@
+/**************** json H Declares Source Code File (.H) ****************/
+/* Name: json.h Version 1.0 */
+/* */
+/* (C) Copyright to the author Olivier BERTRAND 2014 - 2015 */
+/* */
+/* This file contains the JSON classes declares. */
+/***********************************************************************/
+#include "value.h"
+
+#if defined(_DEBUG)
+#define X assert(false);
+#else
+#define X
+#endif
+
+enum JTYP {TYPE_STRG = 1,
+ TYPE_DBL = 2,
+ TYPE_BOOL = 4,
+ TYPE_INTG = 7,
+ TYPE_JSON = 12,
+ TYPE_JAR, TYPE_JOB,
+ TYPE_JVAL};
+
+class JOUT;
+class JSON;
+class JMAP;
+class JVALUE;
+class JOBJECT;
+class JARRAY;
+
+typedef class JPAIR *PJPR;
+typedef class JSON *PJSON;
+typedef class JVALUE *PJVAL;
+typedef class JOBJECT *PJOB;
+typedef class JARRAY *PJAR;
+
+typedef struct {
+ char *str;
+ int len;
+ } STRG, *PSG;
+
+PJSON ParseJson(PGLOBAL g, char *s, int n, int prty, bool *b = NULL);
+PJAR ParseArray(PGLOBAL g, int& i, STRG& src);
+PJOB ParseObject(PGLOBAL g, int& i, STRG& src);
+PJVAL ParseValue(PGLOBAL g, int& i, STRG& src);
+char *ParseString(PGLOBAL g, int& i, STRG& src);
+PVAL ParseNumeric(PGLOBAL g, int& i, STRG& src);
+PSZ Serialize(PGLOBAL g, PJSON jsp, FILE *fs, int pretty);
+bool SerializeArray(JOUT *js, PJAR jarp, bool b);
+bool SerializeObject(JOUT *js, PJOB jobp);
+bool SerializeValue(JOUT *js, PJVAL jvp);
+
+/***********************************************************************/
+/* Class JOUT. Used by Serialize. */
+/***********************************************************************/
+class JOUT : public BLOCK {
+ public:
+ JOUT(PGLOBAL gp) : BLOCK() {g = gp;}
+
+ virtual bool WriteStr(const char *s) = 0;
+ virtual bool WriteChr(const char c) = 0;
+ virtual bool Escape(const char *s) = 0;
+
+ // Member
+ PGLOBAL g;
+}; // end of class JOUT
+
+/***********************************************************************/
+/* Class JOUTSTR. Used to Serialize to a string. */
+/***********************************************************************/
+class JOUTSTR : public JOUT {
+ public:
+ JOUTSTR(PGLOBAL g);
+
+ virtual bool WriteStr(const char *s);
+ virtual bool WriteChr(const char c);
+ virtual bool Escape(const char *s);
+
+ // Member
+ char *Strp; // The serialized string
+ size_t N; // Position of next char
+ size_t Max; // String max size
+}; // end of class JOUTSTR
+
+/***********************************************************************/
+/* Class JOUTFILE. Used to Serialize to a file. */
+/***********************************************************************/
+class JOUTFILE : public JOUT {
+ public:
+ JOUTFILE(PGLOBAL g, FILE *str) : JOUT(g) {Stream = str;}
+
+ virtual bool WriteStr(const char *s);
+ virtual bool WriteChr(const char c);
+ virtual bool Escape(const char *s);
+
+ // Member
+ FILE *Stream;
+}; // end of class JOUTFILE
+
+/***********************************************************************/
+/* Class JOUTPRT. Used to Serialize to a pretty file. */
+/***********************************************************************/
+class JOUTPRT : public JOUTFILE {
+ public:
+ JOUTPRT(PGLOBAL g, FILE *str) : JOUTFILE(g, str) {M = 0; B = false;}
+
+ virtual bool WriteStr(const char *s);
+ virtual bool WriteChr(const char c);
+
+ // Member
+ int M;
+ bool B;
+}; // end of class JOUTPRT
+
+/***********************************************************************/
+/* Class PAIR. The pairs of a json Object. */
+/***********************************************************************/
+class JPAIR : public BLOCK {
+ friend class JOBJECT;
+ friend PJOB ParseObject(PGLOBAL, int&, STRG&);
+ friend bool SerializeObject(JOUT *, PJOB);
+ public:
+ JPAIR(PSZ key) : BLOCK() {Key = key; Val = NULL; Next = NULL;}
+
+ protected:
+ PSZ Key; // This pair key name
+ PJVAL Val; // To the value of the pair
+ PJPR Next; // To the next pair
+}; // end of class JPAIR
+
+/***********************************************************************/
+/* Class JSON. The base class for all other json classes. */
+/***********************************************************************/
+class JSON : public BLOCK {
+ public:
+ JSON(void) {Size = 0;}
+
+ int size(void) {return Size;}
+ virtual void Clear(void) {Size = 0;}
+ virtual JTYP GetType(void) {return TYPE_JSON;}
+ virtual JTYP GetValType(void) {X return TYPE_JSON;}
+ virtual void InitArray(PGLOBAL g) {X}
+ virtual PJVAL AddValue(PGLOBAL g, PJVAL jvp = NULL) {X return NULL;}
+ virtual PJPR AddPair(PGLOBAL g, PSZ key) {X return NULL;}
+ virtual PJVAL GetValue(const char *key) {X return NULL;}
+ virtual PJOB GetObject(void) {X return NULL;}
+ virtual PJAR GetArray(void) {X return NULL;}
+ virtual PJVAL GetValue(int i) {X return NULL;}
+ virtual PVAL GetValue(void) {X return NULL;}
+ virtual PJSON GetJson(void) {X return NULL;}
+ virtual int GetInteger(void) {X return 0;}
+ virtual double GetFloat() {X return 0.0;}
+ virtual PSZ GetString() {X return NULL;}
+ virtual PSZ GetText(PGLOBAL g) {X return NULL;}
+ virtual bool SetValue(PGLOBAL g, PJVAL jvp, int i) {X return true;}
+ virtual void SetValue(PGLOBAL g, PJVAL jvp, PSZ key) {X}
+ virtual void SetValue(PVAL valp) {X}
+ virtual void SetValue(PJSON jsp) {X}
+ virtual bool DeleteValue(int i) {X return true;}
+
+ protected:
+ int Size;
+}; // end of class JSON
+
+/***********************************************************************/
+/* Class JOBJECT: contains a list of value pairs. */
+/***********************************************************************/
+class JOBJECT : public JSON {
+ friend PJOB ParseObject(PGLOBAL, int&, STRG&);
+ friend bool SerializeObject(JOUT *, PJOB);
+ public:
+ JOBJECT(void) : JSON() {First = Last = NULL;}
+
+ virtual void Clear(void) {First = Last = NULL; Size = 0;}
+ virtual JTYP GetType(void) {return TYPE_JOB;}
+ virtual PJPR AddPair(PGLOBAL g, PSZ key);
+ virtual PJOB GetObject(void) {return this;}
+ virtual PJVAL GetValue(const char* key);
+ virtual PSZ GetText(PGLOBAL g);
+ virtual void SetValue(PGLOBAL g, PJVAL jvp, PSZ key);
+
+ protected:
+ PJPR First;
+ PJPR Last;
+}; // end of class JOBJECT
+
+/***********************************************************************/
+/* Class JARRAY. */
+/***********************************************************************/
+class JARRAY : public JSON {
+ friend PJAR ParseArray(PGLOBAL, int&, STRG&);
+ public:
+ JARRAY(void) : JSON() {Alloc = 0; First = Last = NULL; Mvals = NULL;}
+
+ virtual void Clear(void) {First = Last = NULL; Size = 0;}
+ virtual JTYP GetType(void) {return TYPE_JAR;}
+ virtual PJAR GetArray(void) {return this;}
+ virtual PJVAL AddValue(PGLOBAL g, PJVAL jvp = NULL);
+ virtual void InitArray(PGLOBAL g);
+ virtual PJVAL GetValue(int i);
+ virtual bool SetValue(PGLOBAL g, PJVAL jvp, int i);
+ virtual bool DeleteValue(int n);
+
+ protected:
+ // Members
+ int Alloc; // The Mvals allocated size
+ PJVAL First; // Used when constructing
+ PJVAL Last; // Last constructed value
+ PJVAL *Mvals; // Allocated when finished
+}; // end of class JARRAY
+
+/***********************************************************************/
+/* Class JVALUE. */
+/***********************************************************************/
+class JVALUE : public JSON {
+ friend class JARRAY;
+ friend PJVAL ParseValue(PGLOBAL, int&, STRG&);
+ friend bool SerializeValue(JOUT *, PJVAL);
+ public:
+ JVALUE(void) : JSON()
+ {Jsp = NULL; Value = NULL; Next = NULL; Del = false;}
+ JVALUE(PJSON jsp) : JSON()
+ {Jsp = jsp; Value = NULL; Next = NULL; Del = false;}
+ JVALUE(PGLOBAL g, PVAL valp);
+
+ virtual void Clear(void)
+ {Jsp = NULL; Value = NULL; Next = NULL; Del = false; Size = 0;}
+ virtual JTYP GetType(void) {return TYPE_JVAL;}
+ virtual JTYP GetValType(void);
+ virtual PJOB GetObject(void);
+ virtual PJAR GetArray(void);
+ virtual PVAL GetValue(void) {return Value;}
+ virtual PJSON GetJson(void) {return (Jsp ? Jsp : this);}
+ virtual int GetInteger(void);
+ virtual double GetFloat(void);
+ virtual PSZ GetString(void);
+ virtual void SetValue(PVAL valp) {Value = valp;}
+ virtual void SetValue(PJSON jsp) {Jsp = jsp;}
+
+ protected:
+ PJSON Jsp; // To the json value
+ PVAL Value; // The numeric value
+ PJVAL Next; // Next value in array
+ bool Del; // True when deleted
+}; // end of class JVALUE
+
diff --git a/storage/connect/maputil.h b/storage/connect/maputil.h
index b5e54affcea..e310488eb5d 100644
--- a/storage/connect/maputil.h
+++ b/storage/connect/maputil.h
@@ -11,8 +11,8 @@ typedef struct {
DWORD lenH;
} MEMMAP;
-HANDLE CreateFileMap(PGLOBAL, LPCSTR, MEMMAP *, MODE, bool);
-bool CloseMemMap(void *memory, size_t dwSize);
+DllExport HANDLE CreateFileMap(PGLOBAL, LPCSTR, MEMMAP *, MODE, bool);
+DllExport bool CloseMemMap(void *memory, size_t dwSize);
#ifdef __cplusplus
}
diff --git a/storage/connect/msgid.h b/storage/connect/msgid.h
index 4496994afa3..0e9c036dc49 100644
--- a/storage/connect/msgid.h
+++ b/storage/connect/msgid.h
@@ -318,3 +318,8 @@
#define MSG_XPATH_CNTX_ERR 517
#define MSG_XPATH_EVAL_ERR 518
#define MSG_XPATH_NOT_SUPP 519
+#define MSG_ZERO_DIVIDE 520
+#define MSG_FIX_OVFLW_ADD 521
+#define MSG_FIX_OVFLW_TIMES 522
+#define MSG_FIX_UNFLW_ADD 523
+#define MSG_FIX_UNFLW_TIMES 524
diff --git a/storage/connect/mycat.cc b/storage/connect/mycat.cc
index fc6c29092a1..0ae0537ba6f 100644
--- a/storage/connect/mycat.cc
+++ b/storage/connect/mycat.cc
@@ -1,4 +1,4 @@
-/* Copyright (C) Olivier Bertrand 2004 - 2013
+/* Copyright (C) Olivier Bertrand 2004 - 2014
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -18,7 +18,7 @@
/* ------------- */
/* Version 1.4 */
/* */
-/* Author: Olivier Bertrand 2012 - 2013 */
+/* Author: Olivier Bertrand 2012 - 2014 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -89,6 +89,7 @@
#include "tabpivot.h"
#endif // PIVOT_SUPPORT
#include "tabvir.h"
+#include "tabjson.h"
#include "ha_connect.h"
#include "mycat.h"
@@ -139,6 +140,7 @@ TABTYPE GetTypeID(const char *type)
: (!stricmp(type, "PIVOT")) ? TAB_PIVOT
#endif
: (!stricmp(type, "VIR")) ? TAB_VIR
+ : (!stricmp(type, "JSON")) ? TAB_JSON
: (!stricmp(type, "OEM")) ? TAB_OEM : TAB_NIY;
} // end of GetTypeID
@@ -159,6 +161,7 @@ bool IsFileType(TABTYPE type)
case TAB_XML:
case TAB_INI:
case TAB_VEC:
+ case TAB_JSON:
isfile= true;
break;
default:
@@ -181,6 +184,7 @@ bool IsExactType(TABTYPE type)
case TAB_BIN:
case TAB_DBF:
// case TAB_XML: depends on Multiple || Xpand || Coltype
+// case TAB_JSON: depends on Multiple || Xpand || Coltype
case TAB_VEC:
case TAB_VIR:
exact= true;
@@ -214,7 +218,7 @@ bool IsTypeNullable(TABTYPE type)
} // end of IsTypeNullable
/***********************************************************************/
-/* Return true for indexable table by XINDEX. */
+/* Return true for fixed record length tables. */
/***********************************************************************/
bool IsTypeFixed(TABTYPE type)
{
@@ -250,6 +254,7 @@ bool IsTypeIndexable(TABTYPE type)
case TAB_BIN:
case TAB_VEC:
case TAB_DBF:
+ case TAB_JSON:
idx= true;
break;
default:
@@ -275,6 +280,7 @@ int GetIndexType(TABTYPE type)
case TAB_BIN:
case TAB_VEC:
case TAB_DBF:
+ case TAB_JSON:
xtyp= 1;
break;
case TAB_MYSQL:
@@ -538,6 +544,7 @@ PRELDEF MYCAT::MakeTableDesc(PGLOBAL g, LPCSTR name, LPCSTR am)
case TAB_PIVOT: tdp= new(g) PIVOTDEF; break;
#endif // PIVOT_SUPPORT
case TAB_VIR: tdp= new(g) VIRDEF; break;
+ case TAB_JSON: tdp= new(g) JSONDEF; break;
default:
sprintf(g->Message, MSG(BAD_TABLE_TYPE), am, name);
} // endswitch
diff --git a/storage/connect/myconn.cpp b/storage/connect/myconn.cpp
index 92c2faea676..2f3d75b52fa 100644
--- a/storage/connect/myconn.cpp
+++ b/storage/connect/myconn.cpp
@@ -430,10 +430,11 @@ int MYSQLC::GetResultSize(PGLOBAL g, PSZ sql)
/***********************************************************************/
int MYSQLC::Open(PGLOBAL g, const char *host, const char *db,
const char *user, const char *pwd,
- int pt)
+ int pt, const char *csname)
{
const char *pipe = NULL;
- uint cto = 6000, nrt = 12000;
+ uint cto = 6000, nrt = 12000;
+ my_bool my_true= 1;
m_DB = mysql_init(NULL);
@@ -470,6 +471,18 @@ int MYSQLC::Open(PGLOBAL g, const char *host, const char *db,
} // endif pwd
#endif // 0
+/***********************************************************************/
+/* BUG# 17044 Federated Storage Engine is not UTF8 clean */
+/* Add set names to whatever charset the table is at open of table */
+/* this sets the csname like 'set names utf8'. */
+/***********************************************************************/
+ if (csname)
+ mysql_options(m_DB, MYSQL_SET_CHARSET_NAME, csname);
+
+ // Don't know what this one do but FEDERATED does it
+ mysql_options(m_DB, MYSQL_OPT_USE_THREAD_SPECIFIC_MEMORY,
+ (char*)&my_true);
+
if (!mysql_real_connect(m_DB, host, user, pwd, db, pt, pipe, CLIENT_MULTI_RESULTS)) {
#if defined(_DEBUG)
sprintf(g->Message, "mysql_real_connect failed: (%d) %s",
diff --git a/storage/connect/myconn.h b/storage/connect/myconn.h
index 65e6531aee4..79b8a43fe5a 100644
--- a/storage/connect/myconn.h
+++ b/storage/connect/myconn.h
@@ -67,7 +67,7 @@ class DllItem MYSQLC {
int GetTableSize(PGLOBAL g, PSZ query);
int Open(PGLOBAL g, const char *host, const char *db,
const char *user= "root", const char *pwd= "*",
- int pt= 0);
+ int pt= 0, const char *csname = NULL);
int KillQuery(ulong id);
int ExecSQL(PGLOBAL g, const char *query, int *w = NULL);
int ExecSQLcmd(PGLOBAL g, const char *query, int *w);
@@ -98,5 +98,6 @@ class DllItem MYSQLC {
int m_Fields; // The number of result fields
int m_Afrw; // The number of affected rows
bool m_Use; // Use or store result set
+ const char *csname; // Table charset name
}; // end of class MYSQLC
diff --git a/storage/connect/mysql-test/connect/r/json.result b/storage/connect/mysql-test/connect/r/json.result
new file mode 100644
index 00000000000..094bb669d18
--- /dev/null
+++ b/storage/connect/mysql-test/connect/r/json.result
@@ -0,0 +1,439 @@
+#
+# Testing doc samples
+#
+CREATE TABLE t1
+(
+ISBN CHAR(15),
+LANG CHAR(2),
+SUBJECT CHAR(32),
+AUTHOR CHAR(64),
+TITLE CHAR(32),
+TRANSLATION CHAR(32),
+TRANSLATOR CHAR(80),
+PUBLISHER CHAR(32),
+DATEPUB int(4)
+) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.jsn';
+SELECT * FROM t1;
+ISBN LANG SUBJECT AUTHOR TITLE TRANSLATION TRANSLATOR PUBLISHER DATEPUB
+9782212090819 fr applications Jean-Christophe Bernadac Construire une application XML Eyrolles Paris 1999
+9782840825685 fr applications William J. Pardi XML en Action adapté de l'anglais par James Guerin Microsoft Press Paris 1999
+DROP TABLE t1;
+#
+# Testing Jpath. Get the number of authors
+#
+CREATE TABLE t1
+(
+ISBN CHAR(15),
+Language CHAR(2) FIELD_FORMAT='LANG',
+Subject CHAR(32) FIELD_FORMAT='SUBJECT',
+Authors INT(2) FIELD_FORMAT='AUTHOR:[#]',
+Title CHAR(32) FIELD_FORMAT='TITLE',
+Translation CHAR(32) FIELD_FORMAT='TRANSLATION',
+Translator CHAR(80) FIELD_FORMAT='TRANSLATOR',
+Publisher CHAR(20) FIELD_FORMAT='PUBLISHER:NAME',
+Location CHAR(16) FIELD_FORMAT='PUBLISHER:PLACE',
+Year int(4) FIELD_FORMAT='DATEPUB'
+)
+ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.jsn';
+SELECT * FROM t1;
+ISBN Language Subject Authors Title Translation Translator Publisher Location Year
+9782212090819 fr applications 2 Construire une application XML Eyrolles Paris 1999
+9782840825685 fr applications 1 XML en Action adapté de l'anglais par James Guerin Microsoft Press Paris 1999
+DROP TABLE t1;
+#
+# Concatenates the authors
+#
+CREATE TABLE t1
+(
+ISBN CHAR(15),
+Language CHAR(2) FIELD_FORMAT='LANG',
+Subject CHAR(32) FIELD_FORMAT='SUBJECT',
+AuthorFN CHAR(128) FIELD_FORMAT='AUTHOR:[" and "]:FIRSTNAME',
+AuthorLN CHAR(128) FIELD_FORMAT='AUTHOR:[" and "]:LASTNAME',
+Title CHAR(32) FIELD_FORMAT='TITLE',
+Translation CHAR(32) FIELD_FORMAT='TRANSLATION',
+Translator CHAR(80) FIELD_FORMAT='TRANSLATOR',
+Publisher CHAR(20) FIELD_FORMAT='PUBLISHER:NAME',
+Location CHAR(16) FIELD_FORMAT='PUBLISHER:PLACE',
+Year int(4) FIELD_FORMAT='DATEPUB'
+)
+ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.jsn';
+SELECT * FROM t1;
+ISBN Language Subject AuthorFN AuthorLN Title Translation Translator Publisher Location Year
+9782212090819 fr applications Jean-Christophe and François Bernadac and Knab Construire une application XML Eyrolles Paris 1999
+9782840825685 fr applications William J. Pardi XML en Action adapté de l'anglais par James Guerin Microsoft Press Paris 1999
+DROP TABLE t1;
+#
+# Testing expanding authors
+#
+CREATE TABLE t1
+(
+ISBN CHAR(15),
+Language CHAR(2) FIELD_FORMAT='LANG',
+Subject CHAR(32) FIELD_FORMAT='SUBJECT',
+AuthorFN CHAR(128) FIELD_FORMAT='AUTHOR:[X]:FIRSTNAME',
+AuthorLN CHAR(128) FIELD_FORMAT='AUTHOR:[X]:LASTNAME',
+Title CHAR(32) FIELD_FORMAT='TITLE',
+Translation CHAR(32) FIELD_FORMAT='TRANSLATION',
+Translator CHAR(80) FIELD_FORMAT='TRANSLATOR',
+Publisher CHAR(20) FIELD_FORMAT='PUBLISHER:NAME',
+Location CHAR(16) FIELD_FORMAT='PUBLISHER:PLACE',
+Year int(4) FIELD_FORMAT='DATEPUB'
+)
+ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.jsn';
+SELECT * FROM t1;
+ISBN Language Subject AuthorFN AuthorLN Title Translation Translator Publisher Location Year
+9782212090819 fr applications Jean-Christophe Bernadac Construire une application XML Eyrolles Paris 1999
+9782212090819 fr applications François Knab Construire une application XML Eyrolles Paris 1999
+9782840825685 fr applications William J. Pardi XML en Action adapté de l'anglais par James Guerin Microsoft Press Paris 1999
+UPDATE t1 SET AuthorFN = 'Philippe' WHERE AuthorLN = 'Knab';
+SELECT * FROM t1 WHERE ISBN = '9782212090819';
+ISBN Language Subject AuthorFN AuthorLN Title Translation Translator Publisher Location Year
+9782212090819 fr applications Jean-Christophe Bernadac Construire une application XML Eyrolles Paris 1999
+9782212090819 fr applications Philippe Knab Construire une application XML Eyrolles Paris 1999
+#
+# To add an author a new table must be created
+#
+CREATE TABLE t2 (
+FIRSTNAME CHAR(32),
+LASTNAME CHAR(32))
+ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.jsn' OPTION_LIST='Object=[2]:AUTHOR';
+SELECT * FROM t2;
+FIRSTNAME LASTNAME
+William J. Pardi
+INSERT INTO t2 VALUES('Charles','Dickens');
+SELECT * FROM t1;
+ISBN Language Subject AuthorFN AuthorLN Title Translation Translator Publisher Location Year
+9782212090819 fr applications Jean-Christophe Bernadac Construire une application XML Eyrolles Paris 1999
+9782212090819 fr applications Philippe Knab Construire une application XML Eyrolles Paris 1999
+9782840825685 fr applications William J. Pardi XML en Action adapté de l'anglais par James Guerin Microsoft Press Paris 1999
+9782840825685 fr applications Charles Dickens XML en Action adapté de l'anglais par James Guerin Microsoft Press Paris 1999
+DROP TABLE t1;
+DROP TABLE t2;
+#
+# Check the biblio file has the good format
+#
+CREATE TABLE t1
+(
+line char(255)
+)
+ENGINE=CONNECT TABLE_TYPE=DOS FILE_NAME='biblio.jsn';
+SELECT * FROM t1;
+line
+[
+ {
+ "ISBN": "9782212090819",
+ "LANG": "fr",
+ "SUBJECT": "applications",
+ "AUTHOR": [
+ {
+ "FIRSTNAME": "Jean-Christophe",
+ "LASTNAME": "Bernadac"
+ },
+ {
+ "FIRSTNAME": "Philippe",
+ "LASTNAME": "Knab"
+ }
+ ],
+ "TITLE": "Construire une application XML",
+ "PUBLISHER": {
+ "NAME": "Eyrolles",
+ "PLACE": "Paris"
+ },
+ "DATEPUB": 1999
+ },
+ {
+ "ISBN": "9782840825685",
+ "LANG": "fr",
+ "SUBJECT": "applications",
+ "AUTHOR": [
+ {
+ "FIRSTNAME": "William J.",
+ "LASTNAME": "Pardi"
+ },
+ {
+ "FIRSTNAME": "Charles",
+ "LASTNAME": "Dickens"
+ }
+ ],
+ "TITLE": "XML en Action",
+ "TRANSLATION": "adapté de l'anglais par",
+ "TRANSLATOR": {
+ "FIRSTNAME": "James",
+ "LASTNAME": "Guerin"
+ },
+ "PUBLISHER": {
+ "NAME": "Microsoft Press",
+ "PLACE": "Paris"
+ },
+ "DATEPUB": 1999
+ }
+]
+DROP TABLE t1;
+#
+# A file with 2 arrays
+#
+CREATE TABLE t1 (
+WHO CHAR(12),
+WEEK INT(2) FIELD_FORMAT='WEEK:[X]:NUMBER',
+WHAT CHAR(32) FIELD_FORMAT='WEEK::EXPENSE:["+"]:WHAT',
+AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK::EXPENSE:[+]:AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.jsn';
+SELECT * FROM t1;
+WHO WEEK WHAT AMOUNT
+Joe 3 Beer+Food+Food+Car 69.00
+Joe 4 Beer+Beer+Food+Food+Beer 83.00
+Joe 5 Beer+Food 26.00
+Beth 3 Beer 16.00
+Beth 4 Food+Beer 32.00
+Beth 5 Food+Beer 32.00
+Janet 3 Car+Food+Beer 55.00
+Janet 4 Car 17.00
+Janet 5 Beer+Car+Beer+Food 57.00
+DROP TABLE t1;
+#
+# Cannot be fully expanded
+#
+CREATE TABLE t1 (
+WHO CHAR(12),
+WEEK INT(2) FIELD_FORMAT='WEEK:[X]:NUMBER',
+WHAT CHAR(32) FIELD_FORMAT='WEEK:[X]:EXPENSE:[X]:WHAT',
+AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK:[X]:EXPENSE:[X]:AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.jsn';
+SELECT * FROM t1;
+ERROR HY000: Got error 174 'Cannot expand more than one array' from CONNECT
+DROP TABLE t1;
+#
+# Expand expense in 3 one week tables
+#
+CREATE TABLE t2 (
+WHO CHAR(12),
+WEEK INT(2) FIELD_FORMAT='WEEK:[1]:NUMBER',
+WHAT CHAR(32) FIELD_FORMAT='WEEK:[1]:EXPENSE:[X]:WHAT',
+AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK:[1]:EXPENSE:[X]:AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.jsn';
+SELECT * FROM t2;
+WHO WEEK WHAT AMOUNT
+Joe 3 Beer 18.00
+Joe 3 Food 12.00
+Joe 3 Food 19.00
+Joe 3 Car 20.00
+Beth 3 Beer 16.00
+Janet 3 Car 19.00
+Janet 3 Food 18.00
+Janet 3 Beer 18.00
+CREATE TABLE t3 (
+WHO CHAR(12),
+WEEK INT(2) FIELD_FORMAT='WEEK:[2]:NUMBER',
+WHAT CHAR(32) FIELD_FORMAT='WEEK:[2]:EXPENSE:[X]:WHAT',
+AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK:[2]:EXPENSE:[X]:AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.jsn';
+SELECT * FROM t3;
+WHO WEEK WHAT AMOUNT
+Joe 4 Beer 19.00
+Joe 4 Beer 16.00
+Joe 4 Food 17.00
+Joe 4 Food 17.00
+Joe 4 Beer 14.00
+Beth 4 Food 17.00
+Beth 4 Beer 15.00
+Janet 4 Car 17.00
+CREATE TABLE t4 (
+WHO CHAR(12),
+WEEK INT(2) FIELD_FORMAT='WEEK:[3]:NUMBER',
+WHAT CHAR(32) FIELD_FORMAT='WEEK:[3]:EXPENSE:[X]:WHAT',
+AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK:[3]:EXPENSE:[X]:AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.jsn';
+SELECT * FROM t4;
+WHO WEEK WHAT AMOUNT
+Joe 5 Beer 14.00
+Joe 5 Food 12.00
+Beth 5 Food 12.00
+Beth 5 Beer 20.00
+Janet 5 Beer 14.00
+Janet 5 Car 12.00
+Janet 5 Beer 19.00
+Janet 5 Food 12.00
+#
+# The expanded table is made as a TBL table
+#
+CREATE TABLE t1 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32),
+AMOUNT DOUBLE(8,2))
+ENGINE=CONNECT TABLE_TYPE=TBL TABLE_LIST='t2,t3,t4';
+SELECT * FROM t1;
+WHO WEEK WHAT AMOUNT
+Joe 3 Beer 18.00
+Joe 3 Food 12.00
+Joe 3 Food 19.00
+Joe 3 Car 20.00
+Beth 3 Beer 16.00
+Janet 3 Car 19.00
+Janet 3 Food 18.00
+Janet 3 Beer 18.00
+Joe 4 Beer 19.00
+Joe 4 Beer 16.00
+Joe 4 Food 17.00
+Joe 4 Food 17.00
+Joe 4 Beer 14.00
+Beth 4 Food 17.00
+Beth 4 Beer 15.00
+Janet 4 Car 17.00
+Joe 5 Beer 14.00
+Joe 5 Food 12.00
+Beth 5 Food 12.00
+Beth 5 Beer 20.00
+Janet 5 Beer 14.00
+Janet 5 Car 12.00
+Janet 5 Beer 19.00
+Janet 5 Food 12.00
+DROP TABLE t1, t2, t3, t4;
+#
+# Three partial JSON tables
+#
+CREATE TABLE t2 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32) FIELD_FORMAT='EXPENSE:[X]:WHAT',
+AMOUNT DOUBLE(8,2) FIELD_FORMAT='EXPENSE:[X]:AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp3.jsn';
+SELECT * FROM t2;
+WHO WEEK WHAT AMOUNT
+Joe 3 Beer 18.00
+Joe 3 Food 12.00
+Joe 3 Food 19.00
+Joe 3 Car 20.00
+Beth 3 Beer 16.00
+Janet 3 Car 19.00
+Janet 3 Food 18.00
+Janet 3 Beer 18.00
+CREATE TABLE t3 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32) FIELD_FORMAT='EXPENSE:[X]:WHAT',
+AMOUNT DOUBLE(8,2) FIELD_FORMAT='EXPENSE:[X]:AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp4.jsn';
+SELECT * FROM t3;
+WHO WEEK WHAT AMOUNT
+Joe 4 Beer 19.00
+Joe 4 Beer 16.00
+Joe 4 Food 17.00
+Joe 4 Food 17.00
+Joe 4 Beer 14.00
+Beth 4 Food 17.00
+Beth 4 Beer 15.00
+Janet 4 Car 17.00
+CREATE TABLE t4 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32) FIELD_FORMAT='EXPENSE:[X]:WHAT',
+AMOUNT DOUBLE(8,2) FIELD_FORMAT='EXPENSE:[X]:AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp5.jsn';
+SELECT * FROM t4;
+WHO WEEK WHAT AMOUNT
+Joe 5 Beer 14.00
+Joe 5 Food 12.00
+Beth 5 Food 12.00
+Beth 5 Beer 20.00
+Janet 5 Beer 14.00
+Janet 5 Car 12.00
+Janet 5 Beer 19.00
+Janet 5 Food 12.00
+#
+# The complete table can be a multiple JSON table
+#
+CREATE TABLE t1 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32) FIELD_FORMAT='EXPENSE:[X]:WHAT',
+AMOUNT DOUBLE(8,2) FIELD_FORMAT='EXPENSE:[X]:AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp*.jsn' MULTIPLE=1;
+SELECT * FROM t1 ORDER BY WHO, WEEK, WHAT, AMOUNT;
+WHO WEEK WHAT AMOUNT
+Beth 3 Beer 16.00
+Beth 4 Beer 15.00
+Beth 4 Food 17.00
+Beth 5 Beer 20.00
+Beth 5 Food 12.00
+Janet 3 Beer 18.00
+Janet 3 Car 19.00
+Janet 3 Food 18.00
+Janet 4 Car 17.00
+Janet 5 Beer 14.00
+Janet 5 Beer 19.00
+Janet 5 Car 12.00
+Janet 5 Food 12.00
+Joe 3 Beer 18.00
+Joe 3 Car 20.00
+Joe 3 Food 12.00
+Joe 3 Food 19.00
+Joe 4 Beer 14.00
+Joe 4 Beer 16.00
+Joe 4 Beer 19.00
+Joe 4 Food 17.00
+Joe 4 Food 17.00
+Joe 5 Beer 14.00
+Joe 5 Food 12.00
+DROP TABLE t1;
+#
+# Or also a partition JSON table
+#
+CREATE TABLE t1 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32) FIELD_FORMAT='EXPENSE:[X]:WHAT',
+AMOUNT DOUBLE(8,2) FIELD_FORMAT='EXPENSE:[X]:AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp%s.jsn';
+ALTER TABLE t1
+PARTITION BY LIST COLUMNS(WEEK) (
+PARTITION `3` VALUES IN(3),
+PARTITION `4` VALUES IN(4),
+PARTITION `5` VALUES IN(5));
+Warnings:
+Warning 1105 Data repartition in 3 is unchecked
+Warning 1105 Data repartition in 4 is unchecked
+Warning 1105 Data repartition in 5 is unchecked
+SHOW WARNINGS;
+Level Code Message
+Warning 1105 Data repartition in 3 is unchecked
+Warning 1105 Data repartition in 4 is unchecked
+Warning 1105 Data repartition in 5 is unchecked
+SELECT * FROM t1;
+WHO WEEK WHAT AMOUNT
+Joe 3 Beer 18.00
+Joe 3 Food 12.00
+Joe 3 Food 19.00
+Joe 3 Car 20.00
+Beth 3 Beer 16.00
+Janet 3 Car 19.00
+Janet 3 Food 18.00
+Janet 3 Beer 18.00
+Joe 4 Beer 19.00
+Joe 4 Beer 16.00
+Joe 4 Food 17.00
+Joe 4 Food 17.00
+Joe 4 Beer 14.00
+Beth 4 Food 17.00
+Beth 4 Beer 15.00
+Janet 4 Car 17.00
+Joe 5 Beer 14.00
+Joe 5 Food 12.00
+Beth 5 Food 12.00
+Beth 5 Beer 20.00
+Janet 5 Beer 14.00
+Janet 5 Car 12.00
+Janet 5 Beer 19.00
+Janet 5 Food 12.00
+SELECT * FROM t1 WHERE WEEK = 4;
+WHO WEEK WHAT AMOUNT
+Joe 4 Beer 19.00
+Joe 4 Beer 16.00
+Joe 4 Food 17.00
+Joe 4 Food 17.00
+Joe 4 Beer 14.00
+Beth 4 Food 17.00
+Beth 4 Beer 15.00
+Janet 4 Car 17.00
+DROP TABLE t1, t2, t3, t4;
diff --git a/storage/connect/mysql-test/connect/std_data/biblio.jsn b/storage/connect/mysql-test/connect/std_data/biblio.jsn
new file mode 100644
index 00000000000..bab8fd24305
--- /dev/null
+++ b/storage/connect/mysql-test/connect/std_data/biblio.jsn
@@ -0,0 +1,45 @@
+[
+ {
+ "ISBN": "9782212090819",
+ "LANG": "fr",
+ "SUBJECT": "applications",
+ "AUTHOR": [
+ {
+ "FIRSTNAME": "Jean-Christophe",
+ "LASTNAME": "Bernadac"
+ },
+ {
+ "FIRSTNAME": "François",
+ "LASTNAME": "Knab"
+ }
+ ],
+ "TITLE": "Construire une application XML",
+ "PUBLISHER": {
+ "NAME": "Eyrolles",
+ "PLACE": "Paris"
+ },
+ "DATEPUB": 1999
+ },
+ {
+ "ISBN": "9782840825685",
+ "LANG": "fr",
+ "SUBJECT": "applications",
+ "AUTHOR": [
+ {
+ "FIRSTNAME": "William J.",
+ "LASTNAME": "Pardi"
+ }
+ ],
+ "TITLE": "XML en Action",
+ "TRANSLATION": "adapté de l'anglais par",
+ "TRANSLATOR": {
+ "FIRSTNAME": "James",
+ "LASTNAME": "Guerin"
+ },
+ "PUBLISHER": {
+ "NAME": "Microsoft Press",
+ "PLACE": "Paris"
+ },
+ "DATEPUB": 1999
+ }
+]
diff --git a/storage/connect/mysql-test/connect/std_data/expense.jsn b/storage/connect/mysql-test/connect/std_data/expense.jsn
new file mode 100644
index 00000000000..e65ad5261f1
--- /dev/null
+++ b/storage/connect/mysql-test/connect/std_data/expense.jsn
@@ -0,0 +1,158 @@
+[
+ {
+ "WHO": "Joe",
+ "WEEK": [
+ {
+ "NUMBER": 3,
+ "EXPENSE": [
+ {
+ "WHAT": "Beer",
+ "AMOUNT": 18.00
+ },
+ {
+ "WHAT": "Food",
+ "AMOUNT": 12.00
+ },
+ {
+ "WHAT": "Food",
+ "AMOUNT": 19.00
+ },
+ {
+ "WHAT": "Car",
+ "AMOUNT": 20.00
+ }
+ ]
+ },
+ {
+ "NUMBER": 4,
+ "EXPENSE": [
+ {
+ "WHAT": "Beer",
+ "AMOUNT": 19.00
+ },
+ {
+ "WHAT": "Beer",
+ "AMOUNT": 16.00
+ },
+ {
+ "WHAT": "Food",
+ "AMOUNT": 17.00
+ },
+ {
+ "WHAT": "Food",
+ "AMOUNT": 17.00
+ },
+ {
+ "WHAT": "Beer",
+ "AMOUNT": 14.00
+ }
+ ]
+ },
+ {
+ "NUMBER": 5,
+ "EXPENSE": [
+ {
+ "WHAT": "Beer",
+ "AMOUNT": 14.00
+ },
+ {
+ "WHAT": "Food",
+ "AMOUNT": 12.00
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "WHO": "Beth",
+ "WEEK": [
+ {
+ "NUMBER": 3,
+ "EXPENSE": [
+ {
+ "WHAT": "Beer",
+ "AMOUNT": 16.00
+ }
+ ]
+ },
+ {
+ "NUMBER": 4,
+ "EXPENSE": [
+ {
+ "WHAT": "Food",
+ "AMOUNT": 17.00
+ },
+ {
+ "WHAT": "Beer",
+ "AMOUNT": 15.00
+ }
+ ]
+ },
+ {
+ "NUMBER": 5,
+ "EXPENSE": [
+ {
+ "WHAT": "Food",
+ "AMOUNT": 12.00
+ },
+ {
+ "WHAT": "Beer",
+ "AMOUNT": 20.00
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "WHO": "Janet",
+ "WEEK": [
+ {
+ "NUMBER": 3,
+ "EXPENSE": [
+ {
+ "WHAT": "Car",
+ "AMOUNT": 19.00
+ },
+ {
+ "WHAT": "Food",
+ "AMOUNT": 18.00
+ },
+ {
+ "WHAT": "Beer",
+ "AMOUNT": 18.00
+ }
+ ]
+ },
+ {
+ "NUMBER": 4,
+ "EXPENSE": [
+ {
+ "WHAT": "Car",
+ "AMOUNT": 17.00
+ }
+ ]
+ },
+ {
+ "NUMBER": 5,
+ "EXPENSE": [
+ {
+ "WHAT": "Beer",
+ "AMOUNT": 14.00
+ },
+ {
+ "WHAT": "Car",
+ "AMOUNT": 12.00
+ },
+ {
+ "WHAT": "Beer",
+ "AMOUNT": 19.00
+ },
+ {
+ "WHAT": "Food",
+ "AMOUNT": 12.00
+ }
+ ]
+ }
+ ]
+ }
+]
diff --git a/storage/connect/mysql-test/connect/std_data/mulexp3.jsn b/storage/connect/mysql-test/connect/std_data/mulexp3.jsn
new file mode 100644
index 00000000000..c228448b073
--- /dev/null
+++ b/storage/connect/mysql-test/connect/std_data/mulexp3.jsn
@@ -0,0 +1,52 @@
+[
+ {
+ "WHO": "Joe",
+ "WEEK": 3,
+ "EXPENSE": [
+ {
+ "WHAT": "Beer",
+ "AMOUNT": 18.00
+ },
+ {
+ "WHAT": "Food",
+ "AMOUNT": 12.00
+ },
+ {
+ "WHAT": "Food",
+ "AMOUNT": 19.00
+ },
+ {
+ "WHAT": "Car",
+ "AMOUNT": 20.00
+ }
+ ]
+ },
+ {
+ "WHO": "Beth",
+ "WEEK": 3,
+ "EXPENSE": [
+ {
+ "WHAT": "Beer",
+ "AMOUNT": 16.00
+ }
+ ]
+ },
+ {
+ "WHO": "Janet",
+ "WEEK": 3,
+ "EXPENSE": [
+ {
+ "WHAT": "Car",
+ "AMOUNT": 19.00
+ },
+ {
+ "WHAT": "Food",
+ "AMOUNT": 18.00
+ },
+ {
+ "WHAT": "Beer",
+ "AMOUNT": 18.00
+ }
+ ]
+ }
+]
diff --git a/storage/connect/mysql-test/connect/std_data/mulexp4.jsn b/storage/connect/mysql-test/connect/std_data/mulexp4.jsn
new file mode 100644
index 00000000000..0e43ffec07b
--- /dev/null
+++ b/storage/connect/mysql-test/connect/std_data/mulexp4.jsn
@@ -0,0 +1,52 @@
+[
+ {
+ "WHO": "Joe",
+ "WEEK": 4,
+ "EXPENSE": [
+ {
+ "WHAT": "Beer",
+ "AMOUNT": 19.00
+ },
+ {
+ "WHAT": "Beer",
+ "AMOUNT": 16.00
+ },
+ {
+ "WHAT": "Food",
+ "AMOUNT": 17.00
+ },
+ {
+ "WHAT": "Food",
+ "AMOUNT": 17.00
+ },
+ {
+ "WHAT": "Beer",
+ "AMOUNT": 14.00
+ }
+ ]
+ },
+ {
+ "WHO": "Beth",
+ "WEEK": 4,
+ "EXPENSE": [
+ {
+ "WHAT": "Food",
+ "AMOUNT": 17.00
+ },
+ {
+ "WHAT": "Beer",
+ "AMOUNT": 15.00
+ }
+ ]
+ },
+ {
+ "WHO": "Janet",
+ "WEEK": 4,
+ "EXPENSE": [
+ {
+ "WHAT": "Car",
+ "AMOUNT": 17.00
+ }
+ ]
+ }
+]
diff --git a/storage/connect/mysql-test/connect/std_data/mulexp5.jsn b/storage/connect/mysql-test/connect/std_data/mulexp5.jsn
new file mode 100644
index 00000000000..7a707506c2f
--- /dev/null
+++ b/storage/connect/mysql-test/connect/std_data/mulexp5.jsn
@@ -0,0 +1,52 @@
+[
+ {
+ "WHO": "Joe",
+ "WEEK": 5,
+ "EXPENSE": [
+ {
+ "WHAT": "Beer",
+ "AMOUNT": 14.00
+ },
+ {
+ "WHAT": "Food",
+ "AMOUNT": 12.00
+ }
+ ]
+ },
+ {
+ "WHO": "Beth",
+ "WEEK": 5,
+ "EXPENSE": [
+ {
+ "WHAT": "Food",
+ "AMOUNT": 12.00
+ },
+ {
+ "WHAT": "Beer",
+ "AMOUNT": 20.00
+ }
+ ]
+ },
+ {
+ "WHO": "Janet",
+ "WEEK": 5,
+ "EXPENSE": [
+ {
+ "WHAT": "Beer",
+ "AMOUNT": 14.00
+ },
+ {
+ "WHAT": "Car",
+ "AMOUNT": 12.00
+ },
+ {
+ "WHAT": "Beer",
+ "AMOUNT": 19.00
+ },
+ {
+ "WHAT": "Food",
+ "AMOUNT": 12.00
+ }
+ ]
+ }
+]
diff --git a/storage/connect/mysql-test/connect/t/json.test b/storage/connect/mysql-test/connect/t/json.test
new file mode 100644
index 00000000000..a7e630ed0cf
--- /dev/null
+++ b/storage/connect/mysql-test/connect/t/json.test
@@ -0,0 +1,247 @@
+--source include/not_embedded.inc
+--source include/have_partition.inc
+
+let $MYSQLD_DATADIR= `select @@datadir`;
+
+--copy_file $MTR_SUITE_DIR/std_data/biblio.jsn $MYSQLD_DATADIR/test/biblio.jsn
+--copy_file $MTR_SUITE_DIR/std_data/expense.jsn $MYSQLD_DATADIR/test/expense.jsn
+--copy_file $MTR_SUITE_DIR/std_data/mulexp3.jsn $MYSQLD_DATADIR/test/mulexp3.jsn
+--copy_file $MTR_SUITE_DIR/std_data/mulexp4.jsn $MYSQLD_DATADIR/test/mulexp4.jsn
+--copy_file $MTR_SUITE_DIR/std_data/mulexp5.jsn $MYSQLD_DATADIR/test/mulexp5.jsn
+
+--echo #
+--echo # Testing doc samples
+--echo #
+CREATE TABLE t1
+(
+ ISBN CHAR(15),
+ LANG CHAR(2),
+ SUBJECT CHAR(32),
+ AUTHOR CHAR(64),
+ TITLE CHAR(32),
+ TRANSLATION CHAR(32),
+ TRANSLATOR CHAR(80),
+ PUBLISHER CHAR(32),
+ DATEPUB int(4)
+) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.jsn';
+SELECT * FROM t1;
+DROP TABLE t1;
+
+
+--echo #
+--echo # Testing Jpath. Get the number of authors
+--echo #
+CREATE TABLE t1
+(
+ ISBN CHAR(15),
+ Language CHAR(2) FIELD_FORMAT='LANG',
+ Subject CHAR(32) FIELD_FORMAT='SUBJECT',
+ Authors INT(2) FIELD_FORMAT='AUTHOR:[#]',
+ Title CHAR(32) FIELD_FORMAT='TITLE',
+ Translation CHAR(32) FIELD_FORMAT='TRANSLATION',
+ Translator CHAR(80) FIELD_FORMAT='TRANSLATOR',
+ Publisher CHAR(20) FIELD_FORMAT='PUBLISHER:NAME',
+ Location CHAR(16) FIELD_FORMAT='PUBLISHER:PLACE',
+ Year int(4) FIELD_FORMAT='DATEPUB'
+)
+ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.jsn';
+SELECT * FROM t1;
+DROP TABLE t1;
+
+--echo #
+--echo # Concatenates the authors
+--echo #
+CREATE TABLE t1
+(
+ ISBN CHAR(15),
+ Language CHAR(2) FIELD_FORMAT='LANG',
+ Subject CHAR(32) FIELD_FORMAT='SUBJECT',
+ AuthorFN CHAR(128) FIELD_FORMAT='AUTHOR:[" and "]:FIRSTNAME',
+ AuthorLN CHAR(128) FIELD_FORMAT='AUTHOR:[" and "]:LASTNAME',
+ Title CHAR(32) FIELD_FORMAT='TITLE',
+ Translation CHAR(32) FIELD_FORMAT='TRANSLATION',
+ Translator CHAR(80) FIELD_FORMAT='TRANSLATOR',
+ Publisher CHAR(20) FIELD_FORMAT='PUBLISHER:NAME',
+ Location CHAR(16) FIELD_FORMAT='PUBLISHER:PLACE',
+ Year int(4) FIELD_FORMAT='DATEPUB'
+)
+ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.jsn';
+SELECT * FROM t1;
+DROP TABLE t1;
+
+--echo #
+--echo # Testing expanding authors
+--echo #
+CREATE TABLE t1
+(
+ ISBN CHAR(15),
+ Language CHAR(2) FIELD_FORMAT='LANG',
+ Subject CHAR(32) FIELD_FORMAT='SUBJECT',
+ AuthorFN CHAR(128) FIELD_FORMAT='AUTHOR:[X]:FIRSTNAME',
+ AuthorLN CHAR(128) FIELD_FORMAT='AUTHOR:[X]:LASTNAME',
+ Title CHAR(32) FIELD_FORMAT='TITLE',
+ Translation CHAR(32) FIELD_FORMAT='TRANSLATION',
+ Translator CHAR(80) FIELD_FORMAT='TRANSLATOR',
+ Publisher CHAR(20) FIELD_FORMAT='PUBLISHER:NAME',
+ Location CHAR(16) FIELD_FORMAT='PUBLISHER:PLACE',
+ Year int(4) FIELD_FORMAT='DATEPUB'
+)
+ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.jsn';
+SELECT * FROM t1;
+UPDATE t1 SET AuthorFN = 'Philippe' WHERE AuthorLN = 'Knab';
+SELECT * FROM t1 WHERE ISBN = '9782212090819';
+
+--echo #
+--echo # To add an author a new table must be created
+--echo #
+CREATE TABLE t2 (
+FIRSTNAME CHAR(32),
+LASTNAME CHAR(32))
+ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.jsn' OPTION_LIST='Object=[2]:AUTHOR';
+SELECT * FROM t2;
+INSERT INTO t2 VALUES('Charles','Dickens');
+SELECT * FROM t1;
+DROP TABLE t1;
+DROP TABLE t2;
+
+--echo #
+--echo # Check the biblio file has the good format
+--echo #
+CREATE TABLE t1
+(
+ line char(255)
+)
+ENGINE=CONNECT TABLE_TYPE=DOS FILE_NAME='biblio.jsn';
+SELECT * FROM t1;
+DROP TABLE t1;
+
+--echo #
+--echo # A file with 2 arrays
+--echo #
+CREATE TABLE t1 (
+WHO CHAR(12),
+WEEK INT(2) FIELD_FORMAT='WEEK:[X]:NUMBER',
+WHAT CHAR(32) FIELD_FORMAT='WEEK::EXPENSE:["+"]:WHAT',
+AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK::EXPENSE:[+]:AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.jsn';
+SELECT * FROM t1;
+DROP TABLE t1;
+
+--echo #
+--echo # Cannot be fully expanded
+--echo #
+CREATE TABLE t1 (
+WHO CHAR(12),
+WEEK INT(2) FIELD_FORMAT='WEEK:[X]:NUMBER',
+WHAT CHAR(32) FIELD_FORMAT='WEEK:[X]:EXPENSE:[X]:WHAT',
+AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK:[X]:EXPENSE:[X]:AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.jsn';
+--error ER_GET_ERRMSG
+SELECT * FROM t1;
+DROP TABLE t1;
+
+--echo #
+--echo # Expand expense in 3 one week tables
+--echo #
+CREATE TABLE t2 (
+WHO CHAR(12),
+WEEK INT(2) FIELD_FORMAT='WEEK:[1]:NUMBER',
+WHAT CHAR(32) FIELD_FORMAT='WEEK:[1]:EXPENSE:[X]:WHAT',
+AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK:[1]:EXPENSE:[X]:AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.jsn';
+SELECT * FROM t2;
+
+CREATE TABLE t3 (
+WHO CHAR(12),
+WEEK INT(2) FIELD_FORMAT='WEEK:[2]:NUMBER',
+WHAT CHAR(32) FIELD_FORMAT='WEEK:[2]:EXPENSE:[X]:WHAT',
+AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK:[2]:EXPENSE:[X]:AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.jsn';
+SELECT * FROM t3;
+
+CREATE TABLE t4 (
+WHO CHAR(12),
+WEEK INT(2) FIELD_FORMAT='WEEK:[3]:NUMBER',
+WHAT CHAR(32) FIELD_FORMAT='WEEK:[3]:EXPENSE:[X]:WHAT',
+AMOUNT DOUBLE(8,2) FIELD_FORMAT='WEEK:[3]:EXPENSE:[X]:AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.jsn';
+SELECT * FROM t4;
+
+--echo #
+--echo # The expanded table is made as a TBL table
+--echo #
+CREATE TABLE t1 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32),
+AMOUNT DOUBLE(8,2))
+ENGINE=CONNECT TABLE_TYPE=TBL TABLE_LIST='t2,t3,t4';
+SELECT * FROM t1;
+DROP TABLE t1, t2, t3, t4;
+
+--echo #
+--echo # Three partial JSON tables
+--echo #
+CREATE TABLE t2 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32) FIELD_FORMAT='EXPENSE:[X]:WHAT',
+AMOUNT DOUBLE(8,2) FIELD_FORMAT='EXPENSE:[X]:AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp3.jsn';
+SELECT * FROM t2;
+
+CREATE TABLE t3 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32) FIELD_FORMAT='EXPENSE:[X]:WHAT',
+AMOUNT DOUBLE(8,2) FIELD_FORMAT='EXPENSE:[X]:AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp4.jsn';
+SELECT * FROM t3;
+
+CREATE TABLE t4 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32) FIELD_FORMAT='EXPENSE:[X]:WHAT',
+AMOUNT DOUBLE(8,2) FIELD_FORMAT='EXPENSE:[X]:AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp5.jsn';
+SELECT * FROM t4;
+
+--echo #
+--echo # The complete table can be a multiple JSON table
+--echo #
+CREATE TABLE t1 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32) FIELD_FORMAT='EXPENSE:[X]:WHAT',
+AMOUNT DOUBLE(8,2) FIELD_FORMAT='EXPENSE:[X]:AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp*.jsn' MULTIPLE=1;
+SELECT * FROM t1 ORDER BY WHO, WEEK, WHAT, AMOUNT;
+DROP TABLE t1;
+
+--echo #
+--echo # Or also a partition JSON table
+--echo #
+CREATE TABLE t1 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32) FIELD_FORMAT='EXPENSE:[X]:WHAT',
+AMOUNT DOUBLE(8,2) FIELD_FORMAT='EXPENSE:[X]:AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp%s.jsn';
+ALTER TABLE t1
+PARTITION BY LIST COLUMNS(WEEK) (
+PARTITION `3` VALUES IN(3),
+PARTITION `4` VALUES IN(4),
+PARTITION `5` VALUES IN(5));
+SHOW WARNINGS;
+SELECT * FROM t1;
+SELECT * FROM t1 WHERE WEEK = 4;
+DROP TABLE t1, t2, t3, t4;
+
+#
+# Clean up
+#
+--remove_file $MYSQLD_DATADIR/test/biblio.jsn
+--remove_file $MYSQLD_DATADIR/test/expense.jsn
+--remove_file $MYSQLD_DATADIR/test/mulexp3.jsn
+--remove_file $MYSQLD_DATADIR/test/mulexp4.jsn
+--remove_file $MYSQLD_DATADIR/test/mulexp5.jsn
diff --git a/storage/connect/odbccat.h b/storage/connect/odbccat.h
index 9cc14695977..8642d915211 100644
--- a/storage/connect/odbccat.h
+++ b/storage/connect/odbccat.h
@@ -1,3 +1,7 @@
+// Timeout and net wait defaults
+#define DEFAULT_LOGIN_TIMEOUT -1 // means do not set
+#define DEFAULT_QUERY_TIMEOUT -1 // means do not set
+
/***********************************************************************/
/* ODBC catalog function prototypes. */
/***********************************************************************/
@@ -6,8 +10,8 @@ char *ODBCCheckConnection(PGLOBAL g, char *dsn, int cop);
#endif // PROMPT_OK
PQRYRES ODBCDataSources(PGLOBAL g, int maxres, bool info);
PQRYRES ODBCColumns(PGLOBAL g, char *dsn, char *db, char *table,
- char *colpat, int maxres, bool info);
-PQRYRES ODBCSrcCols(PGLOBAL g, char *dsn, char *src);
+ char *colpat, int maxres, int cto, int qto, bool info);
+PQRYRES ODBCSrcCols(PGLOBAL g, char *dsn, char *src, int cto, int qto);
PQRYRES ODBCTables(PGLOBAL g, char *dsn, char *db, char *tabpat,
- int maxres, bool info);
+ int maxres, int cto, int qto, bool info);
PQRYRES ODBCDrivers(PGLOBAL g, int maxres, bool info);
diff --git a/storage/connect/odbconn.cpp b/storage/connect/odbconn.cpp
index bef735b4a6d..3e616ec8f04 100644
--- a/storage/connect/odbconn.cpp
+++ b/storage/connect/odbconn.cpp
@@ -1,7 +1,7 @@
/************ Odbconn C++ Functions Source Code File (.CPP) ************/
-/* Name: ODBCONN.CPP Version 2.0 */
+/* Name: ODBCONN.CPP Version 2.1 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 1998-2014 */
+/* (C) Copyright to the author Olivier BERTRAND 1998-2015 */
/* */
/* This file contains the ODBC connection classes functions. */
/***********************************************************************/
@@ -146,18 +146,25 @@ int TranslateSQLType(int stp, int prec, int& len, char& v)
type = TYPE_DOUBLE;
break;
case SQL_DATETIME: // 9
-// case SQL_DATE: // 9
+ type = TYPE_DATE;
+ len = 19;
+ break;
+ case SQL_TYPE_DATE: // 91
type = TYPE_DATE;
len = 10;
+ v = 'D';
break;
case SQL_INTERVAL: // 10
-// case SQL_TIME: // 10
+ case SQL_TYPE_TIME: // 92
type = TYPE_STRING;
len = 8 + ((prec) ? (prec+1) : 0);
+ v = 'T';
break;
case SQL_TIMESTAMP: // 11
+ case SQL_TYPE_TIMESTAMP: // 93
type = TYPE_DATE;
len = 19 + ((prec) ? (prec+1) : 0);
+ v = 'S';
break;
case SQL_BIGINT: // (-5)
type = TYPE_BIGINT;
@@ -284,7 +291,7 @@ static void ResetNullValues(CATPARM *cap)
/* of an ODBC table that will be retrieved by GetData commands. */
/***********************************************************************/
PQRYRES ODBCColumns(PGLOBAL g, char *dsn, char *db, char *table,
- char *colpat, int maxres, bool info)
+ char *colpat, int maxres, int cto, int qto, bool info)
{
int buftyp[] = {TYPE_STRING, TYPE_STRING, TYPE_STRING, TYPE_STRING,
TYPE_SHORT, TYPE_STRING, TYPE_INT, TYPE_INT,
@@ -303,6 +310,8 @@ PQRYRES ODBCColumns(PGLOBAL g, char *dsn, char *db, char *table,
/************************************************************************/
if (!info) {
ocp = new(g) ODBConn(g, NULL);
+ ocp->SetLoginTimeout((DWORD)cto);
+ ocp->SetQueryTimeout((DWORD)qto);
if (ocp->Open(dsn, 10) < 1) // openReadOnly + noODBCdialog
return NULL;
@@ -379,10 +388,12 @@ PQRYRES ODBCColumns(PGLOBAL g, char *dsn, char *db, char *table,
/* ODBCSrcCols: constructs the result blocks containing the */
/* description of all the columns of a Srcdef option. */
/**************************************************************************/
-PQRYRES ODBCSrcCols(PGLOBAL g, char *dsn, char *src)
+PQRYRES ODBCSrcCols(PGLOBAL g, char *dsn, char *src, int cto, int qto)
{
ODBConn *ocp = new(g) ODBConn(g, NULL);
+ ocp->SetLoginTimeout((DWORD)cto);
+ ocp->SetQueryTimeout((DWORD)qto);
return ocp->GetMetaData(g, dsn, src);
} // end of ODBCSrcCols
@@ -563,7 +574,7 @@ PQRYRES ODBCDataSources(PGLOBAL g, int maxres, bool info)
/* an ODBC database that will be retrieved by GetData commands. */
/**************************************************************************/
PQRYRES ODBCTables(PGLOBAL g, char *dsn, char *db, char *tabpat,
- int maxres, bool info)
+ int maxres, int cto, int qto, bool info)
{
int buftyp[] = {TYPE_STRING, TYPE_STRING, TYPE_STRING,
TYPE_STRING, TYPE_STRING};
@@ -583,6 +594,8 @@ PQRYRES ODBCTables(PGLOBAL g, char *dsn, char *db, char *tabpat,
/* Open the connection with the ODBC data source. */
/**********************************************************************/
ocp = new(g) ODBConn(g, NULL);
+ ocp->SetLoginTimeout((DWORD)cto);
+ ocp->SetQueryTimeout((DWORD)qto);
if (ocp->Open(dsn, 2) < 1) // 2 is openReadOnly
return NULL;
@@ -910,10 +923,13 @@ ODBConn::ODBConn(PGLOBAL g, TDBODBC *tdbp)
m_UpdateOptions = 0;
m_RowsetSize = (DWORD)((tdbp) ? tdbp->Rows : 10);
m_Catver = (tdbp) ? tdbp->Catver : 0;
+ m_Rows = 0;
m_Connect = NULL;
m_Updatable = true;
m_Transact = false;
m_Scrollable = (tdbp) ? tdbp->Scrollable : false;
+ m_First = true;
+ m_Full = false;
m_IDQuoteChar[0] = '"';
m_IDQuoteChar[1] = 0;
//*m_ErrMsg = '\0';
@@ -1068,6 +1084,9 @@ int ODBConn::Open(PSZ ConnectString, DWORD options)
} // endif
/*ver = GetStringInfo(SQL_DRIVER_ODBC_VER);*/
+ // Verify support for required functionality and cache info
+// VerifyConnect(); Deprecated
+ GetConnectInfo();
} catch(DBX *xp) {
// strcpy(g->Message, xp->m_ErrMsg[0]);
strcpy(g->Message, xp->GetErrorMessage(0));
@@ -1076,9 +1095,6 @@ int ODBConn::Open(PSZ ConnectString, DWORD options)
return -1;
} // end try-catch
- // Verify support for required functionality and cache info
- VerifyConnect();
- GetConnectInfo();
return 1;
} // end of Open
@@ -1124,10 +1140,13 @@ void ODBConn::AllocConnect(DWORD Options)
} // endif
#endif // _DEBUG
- rc = SQLSetConnectOption(m_hdbc, SQL_LOGIN_TIMEOUT, m_LoginTimeout);
+ if ((signed)m_LoginTimeout >= 0) {
+ rc = SQLSetConnectOption(m_hdbc, SQL_LOGIN_TIMEOUT, m_LoginTimeout);
+
+ if (trace && rc != SQL_SUCCESS && rc != SQL_SUCCESS_WITH_INFO)
+ htrc("Warning: Failure setting login timeout\n");
- if (trace && rc != SQL_SUCCESS && rc != SQL_SUCCESS_WITH_INFO)
- htrc("Warning: Failure setting login timeout\n");
+ } // endif Timeout
if (!m_Updatable) {
rc = SQLSetConnectOption(m_hdbc, SQL_ACCESS_MODE, SQL_MODE_READ_ONLY);
@@ -1500,6 +1519,20 @@ int ODBConn::Fetch()
ThrowDBX(rc, "Fetch", m_hstmt);
irc = (rc == SQL_NO_DATA_FOUND) ? 0 : (int)crow;
+
+ if (m_First) {
+ // First fetch. Check whether the full table was read
+ if ((m_Full = irc < (signed)m_RowsetSize)) {
+ m_Tdb->Memory = 0; // Not needed anymore
+ m_Rows = irc; // Table size
+ } // endif m_Full
+
+ m_First = false;
+ } // endif m_First
+
+ if (m_Tdb->Memory == 1)
+ m_Rows += irc;
+
} catch(DBX *x) {
if (trace)
for (int i = 0; i < MAX_NUM_OF_MSG && x->m_ErrMsg[i]; i++)
@@ -2150,6 +2183,7 @@ int ODBConn::GetCatInfo(CATPARM *cap)
HSTMT hstmt = NULL;
SQLLEN *vl, *vlen = NULL;
PVAL *pval = NULL;
+ char* *pbuf = NULL;
try {
b = false;
@@ -2226,6 +2260,7 @@ int ODBConn::GetCatInfo(CATPARM *cap)
// Unconditional to handle STRBLK's
pval = (PVAL *)PlugSubAlloc(g, NULL, n * sizeof(PVAL));
vlen = (SQLLEN *)PlugSubAlloc(g, NULL, n * sizeof(SQLLEN));
+ pbuf = (char**)PlugSubAlloc(g, NULL, n * sizeof(char*));
// Now bind the column buffers
for (n = 0, crp = qrp->Colresp; crp; crp = crp->Next) {
@@ -2240,7 +2275,13 @@ int ODBConn::GetCatInfo(CATPARM *cap)
} // endif len
pval[n] = AllocateValue(g, crp->Type, len);
- buffer = pval[n]->GetTo_Val();
+
+ if (crp->Type == TYPE_STRING) {
+ pbuf[n] = (char*)PlugSubAlloc(g, NULL, len);
+ buffer = pbuf[n];
+ } else
+ buffer = pval[n]->GetTo_Val();
+
vl = vlen + n;
// n + 1 because column numbers begin with 1
@@ -2288,7 +2329,13 @@ int ODBConn::GetCatInfo(CATPARM *cap)
} // endif rc
for (n = 0, crp = qrp->Colresp; crp; n++, crp = crp->Next) {
- pval[n]->SetNull(vlen[n] == SQL_NULL_DATA);
+ if (vlen[n] == SQL_NULL_DATA)
+ pval[n]->SetNull(true);
+ else if (crp->Type == TYPE_STRING && vlen[n] != SQL_NULL_DATA)
+ pval[n]->SetValue_char(pbuf[n], vlen[n]);
+ else
+ pval[n]->SetNull(false);
+
crp->Kdata->SetValue(pval[n], i);
cap->Vlen[n][i] = vlen[n];
} // endfor crp
@@ -2343,31 +2390,103 @@ int ODBConn::GetCatInfo(CATPARM *cap)
} // end of GetCatInfo
/***********************************************************************/
+/* Allocate a CONNECT result structure from the ODBC result. */
+/***********************************************************************/
+PQRYRES ODBConn::AllocateResult(PGLOBAL g)
+ {
+ bool uns;
+ PODBCCOL colp;
+ PCOLRES *pcrp, crp;
+ PQRYRES qrp;
+
+ if (!m_Rows) {
+ strcpy(g->Message, "Void result");
+ return NULL;
+ } // endif m_Res
+
+ /*********************************************************************/
+ /* Allocate the result storage for future retrieval. */
+ /*********************************************************************/
+ qrp = (PQRYRES)PlugSubAlloc(g, NULL, sizeof(QRYRES));
+ pcrp = &qrp->Colresp;
+ qrp->Continued = FALSE;
+ qrp->Truncated = FALSE;
+ qrp->Info = FALSE;
+ qrp->Suball = TRUE;
+ qrp->BadLines = 0;
+ qrp->Maxsize = m_Rows;
+ qrp->Maxres = m_Rows;
+ qrp->Nbcol = 0;
+ qrp->Nblin = 0;
+ qrp->Cursor = 0;
+
+ for (colp = (PODBCCOL)m_Tdb->Columns; colp;
+ colp = (PODBCCOL)colp->GetNext())
+ if (!colp->IsSpecial()) {
+ *pcrp = (PCOLRES)PlugSubAlloc(g, NULL, sizeof(COLRES));
+ crp = *pcrp;
+ pcrp = &crp->Next;
+ memset(crp, 0, sizeof(COLRES));
+ crp->Ncol = ++qrp->Nbcol;
+ crp->Name = colp->GetName();
+ crp->Type = colp->GetResultType();
+ crp->Prec = colp->GetScale();
+ crp->Length = colp->GetLength();
+ crp->Clen = colp->GetBuflen();
+ uns = colp->IsUnsigned();
+
+ if (!(crp->Kdata = AllocValBlock(g, NULL, crp->Type, m_Rows,
+ crp->Clen, 0, FALSE, TRUE, uns))) {
+ sprintf(g->Message, MSG(INV_RESULT_TYPE),
+ GetFormatType(crp->Type));
+ return NULL;
+ } // endif Kdata
+
+ if (!colp->IsNullable())
+ crp->Nulls = NULL;
+ else {
+ crp->Nulls = (char*)PlugSubAlloc(g, NULL, m_Rows);
+ memset(crp->Nulls, ' ', m_Rows);
+ } // endelse Nullable
+
+ colp->SetCrp(crp);
+ } // endif colp
+
+ *pcrp = NULL;
+//qrp->Nblin = n;
+ return qrp;
+ } // end of AllocateResult
+
+/***********************************************************************/
/* Restart from beginning of result set */
/***********************************************************************/
-bool ODBConn::Rewind(char *sql, ODBCCOL *tocols)
+int ODBConn::Rewind(char *sql, ODBCCOL *tocols)
{
- RETCODE rc;
+ int rc, rbuf = -1;
if (!m_hstmt)
- return false;
+ rbuf = -1;
+ else if (m_Full)
+ rbuf = m_Rows; // No need to "rewind"
+ else if (m_Scrollable) {
+ SQLULEN crow;
- if (m_Scrollable) {
try {
- rc = SQLFetchScroll(m_hstmt, SQL_FETCH_ABSOLUTE, 0);
+ rc = SQLExtendedFetch(m_hstmt, SQL_FETCH_FIRST, 1, &crow, NULL);
- if (rc != SQL_NO_DATA_FOUND)
- ThrowDBX(rc, "SQLFetchScroll", m_hstmt);
+ if (!Check(rc))
+ ThrowDBX(rc, "SQLExtendedFetch", m_hstmt);
+ rbuf = (int)crow;
} catch(DBX *x) {
strcpy(m_G->Message, x->GetErrorMessage(0));
- return true;
+ rbuf = -1;
} // end try/catch
- } else if (ExecDirectSQL(sql, tocols) < 0)
- return true;
+ } else if (ExecDirectSQL(sql, tocols) >= 0)
+ rbuf = 0;
- return false;
+ return rbuf;
} // end of Rewind
/***********************************************************************/
@@ -2382,7 +2501,7 @@ void ODBConn::Close()
rc = SQLFreeStmt(m_hstmt, SQL_DROP);
m_hstmt = NULL;
} // endif m_hstmt
-
+
if (m_hdbc != SQL_NULL_HDBC) {
if (m_Transact) {
rc = SQLEndTran(SQL_HANDLE_DBC, m_hdbc, SQL_COMMIT);
diff --git a/storage/connect/odbconn.h b/storage/connect/odbconn.h
index 1dd2aa2c16e..dfdb9fe7f56 100644
--- a/storage/connect/odbconn.h
+++ b/storage/connect/odbconn.h
@@ -33,10 +33,6 @@
typedef unsigned char *PUCHAR;
#endif // !WIN32
-// Timeout and net wait defaults
-#define DEFAULT_LOGIN_TIMEOUT 15 // seconds to before fail on connect
-#define DEFAULT_QUERY_TIMEOUT 15 // seconds to before fail waiting for results
-
// Field Flags, used to indicate status of fields
//efine SQL_FIELD_FLAG_DIRTY 0x1
//efine SQL_FIELD_FLAG_NULL 0x2
@@ -124,8 +120,9 @@ class ODBConn : public BLOCK {
forceOdbcDialog = 0x0010}; // Always display ODBC connect dialog
int Open(PSZ ConnectString, DWORD Options = 0);
- bool Rewind(char *sql, ODBCCOL *tocols);
+ int Rewind(char *sql, ODBCCOL *tocols);
void Close(void);
+ PQRYRES AllocateResult(PGLOBAL g);
// Attributes
public:
@@ -187,9 +184,12 @@ class ODBConn : public BLOCK {
DWORD m_UpdateOptions;
DWORD m_RowsetSize;
char m_IDQuoteChar[2];
- int m_Catver;
PSZ m_Connect;
+ int m_Catver;
+ int m_Rows;
bool m_Updatable;
bool m_Transact;
bool m_Scrollable;
+ bool m_First;
+ bool m_Full;
}; // end of ODBConn class definition
diff --git a/storage/connect/plgdbsem.h b/storage/connect/plgdbsem.h
index bbbbc1486b6..e61a49ba9f9 100644
--- a/storage/connect/plgdbsem.h
+++ b/storage/connect/plgdbsem.h
@@ -74,9 +74,10 @@ enum TABTYPE {TAB_UNDEF = 0, /* Table of undefined type */
TAB_PLG = 20, /* PLG NIY */
TAB_PIVOT = 21, /* PIVOT table */
TAB_VIR = 22, /* Virtual tables */
- TAB_JCT = 23, /* Junction tables NIY */
- TAB_DMY = 24, /* DMY Dummy tables NIY */
- TAB_NIY = 25}; /* Table not implemented yet */
+ TAB_JSON = 23, /* JSON tables */
+ TAB_JCT = 24, /* Junction tables NIY */
+ TAB_DMY = 25, /* DMY Dummy tables NIY */
+ TAB_NIY = 26}; /* Table not implemented yet */
enum AMT {TYPE_AM_ERROR = 0, /* Type not defined */
TYPE_AM_ROWID = 1, /* ROWID type (special column) */
@@ -121,6 +122,8 @@ enum AMT {TYPE_AM_ERROR = 0, /* Type not defined */
TYPE_AM_BLK = 131, /* BLK access method type no */
TYPE_AM_ZIP = 132, /* ZIP access method type no */
TYPE_AM_ZLIB = 133, /* ZLIB access method type no */
+ TYPE_AM_JSON = 134, /* JSON access method type no */
+ TYPE_AM_JSN = 135, /* JSN access method type no */
TYPE_AM_MAC = 137, /* MAC table access method type */
TYPE_AM_WMI = 139, /* WMI table access method type */
TYPE_AM_XCL = 140, /* SYS column access method type */
diff --git a/storage/connect/rcmsg.c b/storage/connect/rcmsg.c
index abd74d169cc..9eea944c697 100644
--- a/storage/connect/rcmsg.c
+++ b/storage/connect/rcmsg.c
@@ -31,6 +31,8 @@ char *GetMsgid(int id)
{
char *p = NULL;
+ // This conditional until a real fix is found for MDEV-7304
+#if defined(FRENCH)
if (!stricmp(msglang(), "french"))
switch (id) {
#include "frids.h"
@@ -40,6 +42,7 @@ char *GetMsgid(int id)
} // endswitch(id)
else // English
+#endif // FRENCH
switch (id) {
#include "enids.h"
#if defined(NEWMSG)
diff --git a/storage/connect/reldef.cpp b/storage/connect/reldef.cpp
index e469ae40f1f..51d777a7d17 100644
--- a/storage/connect/reldef.cpp
+++ b/storage/connect/reldef.cpp
@@ -202,6 +202,8 @@ TABDEF::TABDEF(void)
Degree = 0;
Pseudo = 0;
Read_Only = false;
+ m_data_charset = NULL;
+ csname = NULL;
} // end of TABDEF constructor
/***********************************************************************/
@@ -224,6 +226,7 @@ bool TABDEF::Define(PGLOBAL g, PCATLG cat, LPCSTR name, LPCSTR am)
m_data_charset= data_charset_name ?
get_charset_by_csname(data_charset_name, MY_CS_PRIMARY, 0):
NULL;
+ csname = GetStringCatInfo(g, "Table_charset", NULL);
// Get The column definitions
if ((poff = GetColCatInfo(g)) < 0)
diff --git a/storage/connect/reldef.h b/storage/connect/reldef.h
index a1dfe87dca8..6160ea71680 100644
--- a/storage/connect/reldef.h
+++ b/storage/connect/reldef.h
@@ -68,6 +68,7 @@ class DllExport TABDEF : public RELDEF { /* Logical table descriptor */
friend class CATALOG;
friend class PLUGCAT;
friend class MYCAT;
+ friend class TDBASE;
public:
// Constructor
TABDEF(void); // Constructor
@@ -110,6 +111,7 @@ class DllExport TABDEF : public RELDEF { /* Logical table descriptor */
int Pseudo; /* Bit: 1 ROWID Ok, 2 FILEID Ok */
bool Read_Only; /* true for read only tables */
const CHARSET_INFO *m_data_charset;
+ const char *csname; /* Table charset name */
}; // end of TABDEF
/***********************************************************************/
diff --git a/storage/connect/tabdos.cpp b/storage/connect/tabdos.cpp
index 0ef9625ac9b..ba22da52998 100644
--- a/storage/connect/tabdos.cpp
+++ b/storage/connect/tabdos.cpp
@@ -2211,7 +2211,8 @@ int TDBDOS::WriteDB(PGLOBAL g)
htrc("DOS WriteDB: R%d Mode=%d \n", Tdb_No, Mode);
// Make the line to write
- (void)PrepareWriting(g);
+ if (PrepareWriting(g))
+ return true;
if (trace > 1)
htrc("Write: line is='%s'\n", To_Line);
diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp
new file mode 100644
index 00000000000..a3c56965794
--- /dev/null
+++ b/storage/connect/tabjson.cpp
@@ -0,0 +1,1322 @@
+/************* tabjson C++ Program Source Code File (.CPP) *************/
+/* PROGRAM NAME: tabxjson Version 1.0 */
+/* (C) Copyright to the author Olivier BERTRAND 2014 - 2015 */
+/* This program are the JSON class DB execution routines. */
+/***********************************************************************/
+
+/***********************************************************************/
+/* Include relevant sections of the MariaDB header file. */
+/***********************************************************************/
+#include <my_global.h>
+
+/***********************************************************************/
+/* Include application header files: */
+/* global.h is header containing all global declarations. */
+/* plgdbsem.h is header containing the DB application declarations. */
+/* tdbdos.h is header containing the TDBDOS declarations. */
+/* json.h is header containing the JSON classes declarations. */
+/***********************************************************************/
+#include "global.h"
+#include "plgdbsem.h"
+//#include "xtable.h"
+//#include "mycat.h" // for FNC_COL
+#include "maputil.h"
+#include "filamtxt.h"
+#include "tabdos.h"
+//#include "resource.h" // for IDS_COLUMNS
+#include "tabjson.h"
+#include "filamap.h"
+#if defined(ZIP_SUPPORT)
+#include "filamzip.h"
+#endif // ZIP_SUPPORT
+#include "tabmul.h"
+#include "checklvl.h"
+
+/***********************************************************************/
+/* External function. */
+/***********************************************************************/
+USETEMP UseTemp(void);
+
+/* -------------------------- Class JSONDEF -------------------------- */
+
+JSONDEF::JSONDEF(void)
+{
+ Jmode = MODE_OBJECT;
+ Objname = NULL;
+ Xcol = NULL;
+ Limit = 1;
+ ReadMode = 0;
+} // end of JSONDEF constructor
+
+/***********************************************************************/
+/* DefineAM: define specific AM block values. */
+/***********************************************************************/
+bool JSONDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
+{
+ Jmode = (JMODE)GetIntCatInfo("Jmode", MODE_OBJECT);
+ Objname = GetStringCatInfo(g, "Object", NULL);
+ Xcol = GetStringCatInfo(g, "Expand", NULL);
+ Pretty = GetIntCatInfo("Pretty", 2);
+ Limit = GetIntCatInfo("Limit", 10);
+ return DOSDEF::DefineAM(g, "DOS", poff);
+} // end of DefineAM
+
+/***********************************************************************/
+/* GetTable: makes a new Table Description Block. */
+/***********************************************************************/
+PTDB JSONDEF::GetTable(PGLOBAL g, MODE m)
+{
+ PTDBASE tdbp;
+ PTXF txfp = NULL;
+
+ // JSN not used for pretty=1 for insert or delete
+ if (!Pretty || (Pretty == 1 && (m == MODE_READ || m == MODE_UPDATE))) {
+ USETEMP tmp = UseTemp();
+ bool map = Mapped && m != MODE_INSERT &&
+ !(tmp != TMP_NO && m == MODE_UPDATE) &&
+ !(tmp == TMP_FORCE &&
+ (m == MODE_UPDATE || m == MODE_DELETE));
+
+ if (Compressed) {
+#if defined(ZIP_SUPPORT)
+ if (Compressed == 1)
+ txfp = new(g) ZIPFAM(this);
+ else
+ txfp = new(g) ZLBFAM(this);
+#else // !ZIP_SUPPORT
+ sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP");
+ return NULL;
+#endif // !ZIP_SUPPORT
+ } else if (map)
+ txfp = new(g) MAPFAM(this);
+ else
+ txfp = new(g) DOSFAM(this);
+
+ // Txfp must be set for TDBDOS
+ tdbp = new(g) TDBJSN(this, txfp);
+ } else {
+ txfp = new(g) DOSFAM(this);
+ tdbp = new(g) TDBJSON(this, txfp);
+ } // endif Pretty
+
+ if (Multiple)
+ tdbp = new(g) TDBMUL(tdbp);
+
+ return tdbp;
+} // end of GetTable
+
+/* --------------------------- Class TDBJSN -------------------------- */
+
+/***********************************************************************/
+/* Implementation of the TDBJSN class. */
+/***********************************************************************/
+TDBJSN::TDBJSN(PJDEF tdp, PTXF txfp) : TDBDOS(tdp, txfp)
+ {
+ Row = NULL;
+ Colp = NULL;
+ Jmode = tdp->Jmode;
+ Xcol = tdp->Xcol;
+ Fpos = -1;
+ Spos = N = 0;
+ Limit = tdp->Limit;
+ Pretty = tdp->Pretty;
+ Strict = tdp->Strict;
+ NextSame = false;
+ Comma = false;
+ SameRow = 0;
+ Xval = -1;
+ } // end of TDBJSN standard constructor
+
+TDBJSN::TDBJSN(TDBJSN *tdbp) : TDBDOS(NULL, tdbp)
+ {
+ Row = tdbp->Row;
+ Colp = tdbp->Colp;
+ Jmode = tdbp->Jmode;
+ Xcol = tdbp->Xcol;
+ Fpos = tdbp->Fpos;
+ Spos = tdbp->Spos;
+ N = tdbp->N;
+ Limit = tdbp->Limit;
+ Pretty = tdbp->Pretty;
+ Strict = tdbp->Strict;
+ NextSame = tdbp->NextSame;
+ Comma = tdbp->Comma;
+ SameRow = tdbp->SameRow;
+ Xval = tdbp->Xval;
+ } // end of TDBJSN copy constructor
+
+// Used for update
+PTDB TDBJSN::CopyOne(PTABS t)
+ {
+ PTDB tp;
+ PJCOL cp1, cp2;
+ PGLOBAL g = t->G;
+
+ tp = new(g) TDBJSN(this);
+
+ for (cp1 = (PJCOL)Columns; cp1; cp1 = (PJCOL)cp1->GetNext()) {
+ cp2 = new(g) JSONCOL(cp1, tp); // Make a copy
+ NewPointer(t, cp1, cp2);
+ } // endfor cp1
+
+ return tp;
+ } // end of CopyOne
+
+/***********************************************************************/
+/* Allocate JSN column description block. */
+/***********************************************************************/
+PCOL TDBJSN::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n)
+ {
+ PJCOL colp = new(g) JSONCOL(g, cdp, this, cprec, n);
+
+ return (colp->ParseJpath(g)) ? NULL : colp;
+ } // end of MakeCol
+
+/***********************************************************************/
+/* InsertSpecialColumn: Put a special column ahead of the column list.*/
+/***********************************************************************/
+PCOL TDBJSN::InsertSpecialColumn(PGLOBAL g, PCOL colp)
+ {
+ if (!colp->IsSpecial())
+ return NULL;
+
+//if (Xcol && ((SPCBLK*)colp)->GetRnm())
+// colp->SetKey(0); // Rownum is no more a key
+
+ colp->SetNext(Columns);
+ Columns = colp;
+ return colp;
+ } // end of InsertSpecialColumn
+
+/***********************************************************************/
+/* JSON Cardinality: returns table size in number of rows. */
+/***********************************************************************/
+int TDBJSN::Cardinality(PGLOBAL g)
+ {
+ if (!g)
+ return 0;
+ else if (Cardinal < 0)
+ Cardinal = TDBDOS::Cardinality(g);
+
+ return Cardinal;
+ } // end of Cardinality
+
+/***********************************************************************/
+/* JSON GetMaxSize: returns file size estimate in number of lines. */
+/***********************************************************************/
+int TDBJSN::GetMaxSize(PGLOBAL g)
+ {
+ if (MaxSize < 0)
+ MaxSize = TDBDOS::GetMaxSize(g) * ((Xcol) ? Limit : 1);
+
+ return MaxSize;
+ } // end of GetMaxSize
+
+/***********************************************************************/
+/* OpenDB: Data Base open routine for JSN access method. */
+/***********************************************************************/
+bool TDBJSN::OpenDB(PGLOBAL g)
+ {
+ if (Use == USE_OPEN) {
+ /*******************************************************************/
+ /* Table already open replace it at its beginning. */
+ /*******************************************************************/
+ for (PJCOL cp = (PJCOL)Columns; cp; cp = (PJCOL)cp->GetNext()) {
+ cp->Nx = 0;
+ cp->Arp = NULL;
+ } // endfor cp
+
+ Fpos= -1;
+ Spos = 0;
+ NextSame = false;
+ SameRow = 0;
+ } else {
+ /*******************************************************************/
+ /* First opening. */
+ /*******************************************************************/
+ if (Mode == MODE_INSERT)
+ switch (Jmode) {
+ case MODE_OBJECT: Row = new(g) JOBJECT; break;
+ case MODE_ARRAY: Row = new(g) JARRAY; break;
+ case MODE_VALUE: Row = new(g) JVALUE; break;
+ default:
+ sprintf(g->Message, "Invalid Jmode %d", Jmode);
+ return true;
+ } // endswitch Jmode
+
+ } // endif Use
+
+ return TDBDOS::OpenDB(g);
+ } // end of OpenDB
+
+/***********************************************************************/
+/* SkipHeader: Physically skip first header line if applicable. */
+/* This is called from TDBDOS::OpenDB and must be executed before */
+/* Kindex construction if the file is accessed using an index. */
+/***********************************************************************/
+bool TDBJSN::SkipHeader(PGLOBAL g)
+ {
+ int len = GetFileLength(g);
+ bool rc = false;
+
+#if defined(_DEBUG)
+ if (len < 0)
+ return true;
+#endif // _DEBUG
+
+#if defined(WIN32)
+#define Ending 2
+#else // !WIN32
+#define Ending 1
+#endif // !WIN32
+
+ if (Pretty == 1) {
+ if (Mode == MODE_INSERT || Mode == MODE_DELETE) {
+ // Mode Insert and delete are no more handled here
+ assert(false);
+ } else if (len) // !Insert && !Delete
+ rc = (Txfp->SkipRecord(g, false) == RC_FX || Txfp->RecordPos(g));
+
+ } // endif Pretty
+
+ return rc;
+ } // end of SkipHeader
+
+/***********************************************************************/
+/* ReadDB: Data Base read routine for JSN access method. */
+/***********************************************************************/
+int TDBJSN::ReadDB(PGLOBAL g)
+ {
+ int rc;
+
+ N++;
+
+ if (NextSame) {
+ SameRow++;
+ return RC_OK;
+ } else if ((rc = TDBDOS::ReadDB(g)) == RC_OK)
+ if (!IsRead() && ((rc = ReadBuffer(g)) != RC_OK)) {
+ // Deferred reading failed
+ } else if (!(Row = ParseJson(g, To_Line,
+ strlen(To_Line), Pretty, &Comma))) {
+ rc = (Pretty == 1 && !strcmp(To_Line, "]")) ? RC_EF : RC_FX;
+ } else {
+ SameRow = 0;
+ Fpos++;
+ rc = RC_OK;
+ } // endif's
+
+ return rc;
+ } // end of ReadDB
+
+/***********************************************************************/
+/* PrepareWriting: Prepare the line for WriteDB. */
+/***********************************************************************/
+ bool TDBJSN::PrepareWriting(PGLOBAL g)
+ {
+ PSZ s = Serialize(g, Row, NULL, Pretty);
+
+ if (s) {
+ if (Comma)
+ strcat(s, ",");
+
+ if ((signed)strlen(s) > Lrecl) {
+ sprintf(g->Message, "Line would be truncated (lrecl=%d)", Lrecl);
+ return true;
+ } else
+ strcpy(To_Line, s);
+
+ Row->Clear();
+ return false;
+ } else
+ return true;
+
+ } // end of PrepareWriting
+
+/* ----------------------------- JSNCOL ------------------------------- */
+
+/***********************************************************************/
+/* JSNCOL public constructor. */
+/***********************************************************************/
+JSONCOL::JSONCOL(PGLOBAL g, PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i)
+ : DOSCOL(g, cdp, tdbp, cprec, i, "DOS")
+ {
+ Tjp = (TDBJSN *)(tdbp->GetOrig() ? tdbp->GetOrig() : tdbp);
+ Arp = NULL;
+ Jpath = cdp->GetFmt();
+ MulVal = NULL;
+ Nodes = NULL;
+ Nod = Nx =0;
+ Ival = -1;
+ Xpd = false;
+ Parsed = false;
+ } // end of JSONCOL constructor
+
+/***********************************************************************/
+/* JSONCOL constructor used for copying columns. */
+/* tdbp is the pointer to the new table descriptor. */
+/***********************************************************************/
+JSONCOL::JSONCOL(JSONCOL *col1, PTDB tdbp) : DOSCOL(col1, tdbp)
+ {
+ Tjp = col1->Tjp;
+ Arp = col1->Arp;
+ Jpath = col1->Jpath;
+ MulVal = col1->MulVal;
+ Nodes = col1->Nodes;
+ Nod = col1->Nod;
+ Ival = col1->Ival;
+ Nx = col1->Nx;
+ Xpd = col1->Xpd;
+ Parsed = col1->Parsed;
+ } // end of JSONCOL copy constructor
+
+/***********************************************************************/
+/* SetBuffer: prepare a column block for write operation. */
+/***********************************************************************/
+bool JSONCOL::SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check)
+ {
+ if (DOSCOL::SetBuffer(g, value, ok, check))
+ return true;
+
+ // Parse the json path
+ if (ParseJpath(g))
+ return true;
+
+ Tjp = (TDBJSN*)To_Tdb;
+ return false;
+ } // end of SetBuffer
+
+/***********************************************************************/
+/* Analyse array processing options. */
+/***********************************************************************/
+bool JSONCOL::CheckExpand(PGLOBAL g, int i, PSZ nm, bool b)
+ {
+ if (Tjp->Xcol && nm && !strcmp(nm, Tjp->Xcol) &&
+ (Tjp->Xval < 0 || Tjp->Xval == i)) {
+ Xpd = true; // Expandable object
+ Nodes[i].Op = OP_XX;
+ Tjp->Xval = i;
+ } else if (b) {
+ strcpy(g->Message, "Cannot expand more than one array");
+ return true;
+ } // endif Xcol
+
+ return false;
+ } // end of CheckExpand
+
+/***********************************************************************/
+/* Analyse array processing options. */
+/***********************************************************************/
+bool JSONCOL::SetArrayOptions(PGLOBAL g, char *p, int i, PSZ nm)
+ {
+ int n = (int)strlen(p);
+ bool dg = true;
+ PJNODE jnp = &Nodes[i];
+
+ if (*p) {
+ if (p[--n] == ']') {
+ p[n--] = 0;
+ p++;
+ } else {
+ // Wrong array specification
+ sprintf(g->Message,
+ "Invalid array specification %s for %s", p, Name);
+ return true;
+ } // endif p
+
+ } // endif *p
+
+ // To check whether a numeric Rank was specified
+ for (int k = 0; dg && p[k]; k++)
+ dg = isdigit(p[k]) > 0;
+
+ if (!n) {
+ // Default specifications
+ if (CheckExpand(g, i, nm, false))
+ return true;
+ else if (jnp->Op != OP_XX)
+ if (!Value->IsTypeNum()) {
+ jnp->CncVal = AllocateValue(g, (void*)", ", TYPE_STRING);
+ jnp->Op = OP_CNC;
+ } else
+ jnp->Op = OP_ADD;
+
+ } else if (dg) {
+ if (atoi(p) > 0) {
+ // Return nth value
+ jnp->Rank = atoi(p);
+ jnp->Op = OP_EQ;
+ } else // Ignore array
+ jnp->Op = OP_NULL;
+
+ } else if (n == 1) {
+ // Set the Op value;
+ switch (*p) {
+ case '+': jnp->Op = OP_ADD; break;
+ case '*': jnp->Op = OP_MULT; break;
+ case '>': jnp->Op = OP_MAX; break;
+ case '<': jnp->Op = OP_MIN; break;
+ case '#': jnp->Op = OP_NUM; break;
+ case '!': jnp->Op = OP_SEP; break; // Average
+ case 'x':
+ case 'X': // Expand this array
+ if (!Tjp->Xcol && nm) {
+ Xpd = true;
+ jnp->Op = OP_XX;
+ Tjp->Xval = i;
+ Tjp->Xcol = nm;
+ } else if (CheckExpand(g, i, nm, true))
+ return true;
+
+ break;
+ default:
+ sprintf(g->Message,
+ "Invalid function specification %c for %s", *p, Name);
+ return true;
+ } // endswitch *p
+
+ } else if (*p == '"' && p[n - 1] == '"') {
+ // This is a concat specification
+ jnp->Op = OP_CNC;
+
+ if (n > 2) {
+ // Set concat intermediate string
+ p[n - 1] = 0;
+ jnp->CncVal = AllocateValue(g, p + 1, TYPE_STRING);
+ } // endif n
+
+ } else {
+ sprintf(g->Message, "Wrong array specification for %s", Name);
+ return true;
+ } // endif's
+
+ return false;
+ } // end of SetArrayOptions
+
+/***********************************************************************/
+/* Parse the eventual passed Jpath information. */
+/* This information can be specified in the Fieldfmt column option */
+/* when creating the table. It permits to indicate the position of */
+/* the node corresponding to that column. */
+/***********************************************************************/
+bool JSONCOL::ParseJpath(PGLOBAL g)
+ {
+ char *p, *p2 = NULL, *pbuf = NULL;
+ int i;
+ bool mul = false;
+
+ if (Parsed)
+ return false; // Already done
+ else if (InitValue(g))
+ return true;
+ else if (!Jpath)
+ Jpath = Name;
+
+ pbuf = (char*)PlugSubAlloc(g, NULL, strlen(Jpath) + 1);
+ strcpy(pbuf, Jpath);
+
+ // The Jpath must be analyzed
+ for (i = 0, p = pbuf; (p = strchr(p, ':')); i++, p++)
+ Nod++; // One path node found
+
+ Nodes = (PJNODE)PlugSubAlloc(g, NULL, (++Nod) * sizeof(JNODE));
+ memset(Nodes, 0, (Nod) * sizeof(JNODE));
+
+ // Analyze the Jpath for this column
+ for (i = 0, p = pbuf; i < Nod; i++, p = (p2 ? p2 + 1 : p + strlen(p))) {
+ if ((p2 = strchr(p, ':')))
+ *p2 = 0;
+
+ // Jpath must be explicit
+ if (*p == 0 || *p == '[') {
+ // Analyse intermediate array processing
+ if (SetArrayOptions(g, p, i, Nodes[i-1].Key))
+ return true;
+
+ } else {
+ Nodes[i].Key = p;
+ Nodes[i].Op = OP_EXIST;
+ } // endif's
+
+ } // endfor i, p
+
+ MulVal = AllocateValue(g, Value);
+ Parsed = true;
+ return false;
+ } // end of ParseJpath
+
+/***********************************************************************/
+/* SetValue: Set a value from a JVALUE contains. */
+/***********************************************************************/
+void JSONCOL::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n)
+ {
+ if (val) {
+ if (Nodes[n].Op == OP_NUM)
+ vp->SetValue(1);
+ else {
+ again:
+ switch (val->GetValType()) {
+ case TYPE_STRG:
+ case TYPE_INTG:
+ case TYPE_DBL:
+ vp->SetValue_pval(val->GetValue());
+ break;
+ case TYPE_BOOL:
+ if (vp->IsTypeNum())
+ vp->SetValue(val->GetInteger() ? 1 : 0);
+ else
+ vp->SetValue_psz((PSZ)(val->GetInteger() ? "true" : "false"));
+
+ break;
+ case TYPE_JAR:
+ val = val->GetArray()->GetValue(0);
+ goto again;
+ case TYPE_JOB:
+ if (!vp->IsTypeNum()) {
+ vp->SetValue_psz(val->GetObject()->GetText(g));
+ break;
+ } // endif Type
+
+ default:
+ vp->Reset();
+ } // endswitch Type
+
+ } // endelse
+
+ } else
+ vp->Reset();
+
+ } // end of SetJsonValue
+
+/***********************************************************************/
+/* GetRow: Get the object containing this column. */
+/***********************************************************************/
+PJSON JSONCOL::GetRow(PGLOBAL g, int mode)
+ {
+ PJVAL val;
+ PJAR arp;
+ PJSON nwr, row = Tjp->Row;
+
+ for (int i = 0; i < Nod-1 && row; i++) {
+ switch (row->GetType()) {
+ case TYPE_JOB:
+ if (!Nodes[i].Key)
+ // Expected Array was not there
+ continue;
+
+ val = ((PJOB)row)->GetValue(Nodes[i].Key);
+ break;
+ case TYPE_JAR:
+ if (!Nodes[i].Key) {
+ if (Nodes[i].Op != OP_NULL) {
+ Ival = i;
+ arp = (PJAR)row;
+
+ if (mode < 2) // First pass
+ Arp = arp;
+
+ if (Nodes[i].Op != OP_XX) {
+ if (Nodes[i].Rank)
+ val = arp->GetValue(Nodes[i].Rank - 1);
+ else
+ val = arp->GetValue(arp == Arp ? Nx : 0);
+
+ } else
+ val = arp->GetValue(Tjp->SameRow);
+
+ } else
+ val = NULL;
+
+ } else {
+ strcpy(g->Message, "Unexpected array");
+ val = NULL; // Not an expected array
+ } // endif Nodes
+
+ break;
+ case TYPE_JVAL:
+ val = (PJVAL)row;
+ break;
+ default:
+ sprintf(g->Message, "Invalid row JSON type %d", row->GetType());
+ val = NULL;
+ } // endswitch Type
+
+ if (val) {
+ row = val->GetJson();
+ } else if (mode == 1) { // mode write
+ // Construct missing objects
+ for (i++; row && i < Nod; i++) {
+ if (!Nodes[i].Key) {
+ // Construct intermediate array
+ nwr = new(g) JARRAY;
+ } else {
+ nwr = new(g) JOBJECT;
+ } // endif Nodes
+
+ if (row->GetType() == TYPE_JOB) {
+ ((PJOB)row)->SetValue(g, new(g) JVALUE(nwr), Nodes[i-1].Key);
+ } else if (row->GetType() == TYPE_JAR) {
+ ((PJAR)row)->AddValue(g, new(g) JVALUE(nwr));
+ ((PJAR)row)->InitArray(g);
+ } else {
+ strcpy(g->Message, "Wrong type when writing new row");
+ nwr = NULL;
+ } // endif's
+
+ row = nwr;
+ } // endfor i
+
+ break;
+ } else
+ row = NULL;
+
+ } // endfor i
+
+ return row;
+ } // end of GetRow
+
+/***********************************************************************/
+/* ReadColumn: */
+/***********************************************************************/
+void JSONCOL::ReadColumn(PGLOBAL g)
+ {
+ int mode = 0, n = Nod - 1;
+ PJSON row;
+ PJVAL val = NULL;
+
+ evenmore:
+ row = GetRow(g, mode);
+
+ more:
+ if (row) switch (row->GetType()) {
+ case TYPE_JOB:
+ if (Nodes[n].Key)
+ val = row->GetValue(Nodes[n].Key);
+ else
+ val = new(g) JVALUE(row);
+
+ break;
+ case TYPE_JAR:
+ // Multiple column ?
+ if (Nodes[n].Op != OP_NULL) {
+ Arp = (PJAR)row;
+ val = Arp->GetValue(Nodes[n].Rank > 0 ?
+ Nodes[n].Rank - 1 :
+ Nodes[n].Op == OP_XX ? Tjp->SameRow : Nx);
+ Ival = n;
+ } else
+ val = NULL;
+
+ break;
+ case TYPE_JVAL:
+ val = (PJVAL)row;
+ break;
+ default:
+ sprintf(g->Message, "Wrong return value type %d", row->GetType());
+ Value->Reset();
+ return;
+ } // endswitch Type
+
+ if (!Nx /*|| (Xpd)*/)
+ SetJsonValue(g, Value, val, n);
+
+ if (Arp) {
+ // Multiple column
+ int ars = (Nodes[Ival].Rank > 0) ? 1 : MY_MIN(Tjp->Limit, Arp->size());
+
+ if (Nodes[Ival].Op == OP_XX) {
+ if (ars > Tjp->SameRow + 1)
+ Tjp->NextSame = true; // More to come
+ else {
+ Tjp->NextSame = false;
+ Arp = NULL;
+ } // endelse
+
+ } else {
+ if (Nx && val) {
+ SetJsonValue(g, MulVal, val, Ival);
+
+ if (!MulVal->IsZero()) {
+ PVAL val[2];
+ bool err;
+
+ switch (Nodes[Ival].Op) {
+ case OP_CNC:
+ if (Nodes[Ival].CncVal) {
+ val[0] = Nodes[Ival].CncVal;
+ err = Value->Compute(g, val, 1, Nodes[Ival].Op);
+ } // endif CncVal
+
+ val[0] = MulVal;
+ err = Value->Compute(g, val, 1, Nodes[Ival].Op);
+ break;
+ case OP_NUM:
+ case OP_SEP:
+ val[0] = Value;
+ val[1] = MulVal;
+ err = Value->Compute(g, val, 2, OP_ADD);
+ break;
+ default:
+ val[0] = Value;
+ val[1] = MulVal;
+ err = Value->Compute(g, val, 2, Nodes[Ival].Op);
+ } // endswitch Op
+
+ if (err)
+ Value->Reset();
+
+ } // endif Zero
+
+ } // endif Nx
+
+ if (ars > ++Nx) {
+ if (Ival != n) {
+ mode = 2;
+ goto evenmore;
+ } else
+ goto more;
+
+ } else {
+ if (Nodes[Ival].Op == OP_SEP) {
+ // Calculate average
+ PVAL val[2];
+
+ MulVal->SetValue(ars);
+ val[0] = Value;
+ val[1] = MulVal;
+
+ if (Value->Compute(g, val, 2, OP_DIV))
+ Value->Reset();
+
+ } // endif Op
+
+ Arp = NULL;
+ Nx = 0;
+ } // endif ars
+
+ } // endif Op
+
+ } // endif Arp
+
+ } // end of ReadColumn
+
+/***********************************************************************/
+/* WriteColumn: */
+/***********************************************************************/
+void JSONCOL::WriteColumn(PGLOBAL g)
+ {
+ /*********************************************************************/
+ /* Check whether this node must be written. */
+ /*********************************************************************/
+ if (Value != To_Val)
+ Value->SetValue_pval(To_Val, FALSE); // Convert the updated value
+
+ /*********************************************************************/
+ /* On INSERT Null values are represented by no node. */
+ /*********************************************************************/
+ if (Value->IsNull() && Tjp->Mode == MODE_INSERT)
+ return;
+
+ PJOB objp = NULL;
+ PJAR arp = NULL;
+ PJVAL jvp = NULL;
+ PJSON row = GetRow(g, 1);
+ JTYP type = row->GetType();
+
+ switch (row->GetType()) {
+ case TYPE_JOB: objp = (PJOB)row; break;
+ case TYPE_JAR: arp = (PJAR)row; break;
+ case TYPE_JVAL: jvp = (PJVAL)row; break;
+ default: row = NULL; // ???????????????????????????
+ } // endswitch Type
+
+ if (row) switch (Buf_Type) {
+ case TYPE_STRING:
+ case TYPE_DATE:
+ case TYPE_INT:
+ case TYPE_DOUBLE:
+ if (arp) {
+ if (Nodes[Nod-1].Rank)
+ arp->SetValue(g, new(g) JVALUE(g, Value), Nodes[Nod-1].Rank-1);
+ else
+ arp->AddValue(g, new(g) JVALUE(g, Value));
+
+ arp->InitArray(g);
+ } else if (objp) {
+ if (Nodes[Nod-1].Key)
+ objp->SetValue(g, new(g) JVALUE(g, Value), Nodes[Nod-1].Key);
+
+ } else if (jvp)
+ jvp->SetValue(Value);
+
+ break;
+ default: // ??????????
+ sprintf(g->Message, "Invalid column type %d", Buf_Type);
+ } // endswitch Type
+
+ } // end of WriteColumn
+
+/* -------------------------- Class TDBJSON -------------------------- */
+
+/***********************************************************************/
+/* Implementation of the TDBJSON class. */
+/***********************************************************************/
+TDBJSON::TDBJSON(PJDEF tdp, PTXF txfp) : TDBJSN(tdp, txfp)
+ {
+ Top = NULL;
+ Doc = NULL;
+ Objname = tdp->Objname;
+ Multiple = tdp->Multiple;
+ Done = Changed = false;
+ } // end of TDBJSON standard constructor
+
+TDBJSON::TDBJSON(PJTDB tdbp) : TDBJSN(tdbp)
+ {
+ Top = tdbp->Top;
+ Doc = tdbp->Doc;
+ Objname = tdbp->Objname;
+ Multiple = tdbp->Multiple;
+ Done = tdbp->Done;
+ Changed = tdbp->Changed;
+ } // end of TDBJSON copy constructor
+
+// Used for update
+PTDB TDBJSON::CopyOne(PTABS t)
+ {
+ PTDB tp;
+ PJCOL cp1, cp2;
+ PGLOBAL g = t->G;
+
+ tp = new(g) TDBJSON(this);
+
+ for (cp1 = (PJCOL)Columns; cp1; cp1 = (PJCOL)cp1->GetNext()) {
+ cp2 = new(g) JSONCOL(cp1, tp); // Make a copy
+ NewPointer(t, cp1, cp2);
+ } // endfor cp1
+
+ return tp;
+ } // end of CopyOne
+
+/***********************************************************************/
+/* Make the document tree from a file. */
+/***********************************************************************/
+int TDBJSON::MakeNewDoc(PGLOBAL g)
+ {
+ // Create a void table that will be populated
+ Doc = new(g) JARRAY;
+
+ if (Objname) {
+ // Parse and allocate Objname item(s)
+ char *p;
+ char *objpath = (char*)PlugSubAlloc(g, NULL, strlen(Objname)+1);
+ int i;
+ PJOB objp;
+ PJAR arp;
+ PJVAL val = NULL;
+
+ strcpy(objpath, Objname);
+ Top = NULL;
+
+ for (; objpath; objpath = p) {
+ if ((p = strchr(objpath, ':')))
+ *p++ = 0;
+
+ if (*objpath != '[') {
+ objp = new(g) JOBJECT;
+
+ if (!Top)
+ Top = objp;
+
+ if (val)
+ val->SetValue(objp);
+
+ val = new(g) JVALUE;
+ objp->SetValue(g, val, objpath);
+ } else if (objpath[strlen(objpath)-1] == ']') {
+ arp = new(g) JARRAY;
+
+ if (!Top)
+ Top = arp;
+
+ if (val)
+ val->SetValue(arp);
+
+ val = new(g) JVALUE;
+ i = atoi(objpath+1) - 1;
+ arp->SetValue(g, val, i);
+ arp->InitArray(g);
+ } else {
+ sprintf(g->Message, "Invalid Table path %s", Objname);
+ return RC_FX;
+ } // endif objpath
+
+ } // endfor p
+
+ val->SetValue(Doc);
+ } else
+ Top = Doc;
+
+ return RC_OK;
+ } // end of MakeNewDoc
+
+/***********************************************************************/
+/* Make the document tree from a file. */
+/***********************************************************************/
+int TDBJSON::MakeDocument(PGLOBAL g)
+ {
+ char *p, *memory, *objpath, *key, filename[_MAX_PATH];
+ int len, i = 0;
+ HANDLE hFile;
+ MEMMAP mm;
+ PJSON jsp;
+ PJOB objp = NULL;
+ PJAR arp = NULL;
+ PJVAL val = NULL;
+
+ if (Done)
+ return RC_OK;
+ else
+ Done = true;
+
+ // Now open the JSON file
+ PlugSetPath(filename, Txfp->To_File, GetPath());
+
+ /*********************************************************************/
+ /* Create the mapping file object. */
+ /*********************************************************************/
+ hFile = CreateFileMap(g, filename, &mm, MODE_READ, false);
+
+ if (hFile == INVALID_HANDLE_VALUE) {
+ DWORD drc = GetLastError();
+
+ if (drc != ENOENT || Mode != MODE_INSERT) {
+ if (!(*g->Message))
+ sprintf(g->Message, MSG(OPEN_MODE_ERROR),
+ "map", (int)drc, filename);
+
+ return RC_FX;
+ } else
+ return MakeNewDoc(g);
+
+ } // endif hFile
+
+ /*********************************************************************/
+ /* Get the file size (assuming file is smaller than 4 GB) */
+ /*********************************************************************/
+ len = mm.lenL;
+ memory = (char *)mm.memory;
+
+ if (!len) { // Empty file
+ CloseFileHandle(hFile);
+ CloseMemMap(memory, len);
+
+ if (Mode == MODE_INSERT)
+ return MakeNewDoc(g);
+
+ } // endif len
+
+ if (!memory) {
+ CloseFileHandle(hFile);
+ sprintf(g->Message, MSG(MAP_VIEW_ERROR), filename, GetLastError());
+ return RC_FX;
+ } // endif Memory
+
+ CloseFileHandle(hFile); // Not used anymore
+ hFile = INVALID_HANDLE_VALUE; // For Fblock
+
+ /*********************************************************************/
+ /* Parse the json file and allocate its tree structure. */
+ /*********************************************************************/
+ g->Message[0] = 0;
+ jsp = Top = ParseJson(g, memory, len, Pretty);
+ CloseMemMap(memory, len);
+
+ if (!jsp && g->Message[0])
+ return RC_FX;
+
+ if (Objname) {
+ objpath = (char*)PlugSubAlloc(g, NULL, strlen(Objname) + 1);
+ strcpy(objpath, Objname);
+ } else
+ objpath = NULL;
+
+ /*********************************************************************/
+ /* Find the table in the tree structure. */
+ /*********************************************************************/
+ for (; jsp && objpath; objpath = p) {
+ if ((p = strchr(objpath, ':')))
+ *p++ = 0;
+
+ if (*objpath != '[') { // objpass is a key
+ if (jsp->GetType() != TYPE_JOB) {
+ strcpy(g->Message, "Table path does no match json file");
+ return RC_FX;
+ } // endif Type
+
+ key = objpath;
+ objp = jsp->GetObject();
+ arp = NULL;
+ val = objp->GetValue(key);
+
+ if (!val || !(jsp = val->GetJson())) {
+ sprintf(g->Message, "Cannot find object key %s", key);
+ return RC_FX;
+ } // endif val
+
+ } else if (objpath[strlen(objpath)-1] == ']') {
+ if (jsp->GetType() != TYPE_JAR) {
+ strcpy(g->Message, "Table path does no match json file");
+ return RC_FX;
+ } // endif Type
+
+ arp = jsp->GetArray();
+ objp = NULL;
+ i = atoi(objpath+1) - 1;
+ val = arp->GetValue(i);
+
+ if (!val) {
+ sprintf(g->Message, "Cannot find array value %d", i);
+ return RC_FX;
+ } // endif val
+
+ } else {
+ sprintf(g->Message, "Invalid Table path %s", Objname);
+ return RC_FX;
+ } // endif objpath
+
+ jsp = val->GetJson();
+ } // endfor objpath
+
+ if (jsp && jsp->GetType() == TYPE_JAR)
+ Doc = jsp->GetArray();
+ else {
+ // The table is void or is just one object or one value
+ Doc = new(g) JARRAY;
+
+ if (val) {
+ Doc->AddValue(g, val);
+ Doc->InitArray(g);
+ } else if (jsp) {
+ Doc->AddValue(g, new(g) JVALUE(jsp));
+ Doc->InitArray(g);
+ } // endif val
+
+ if (objp)
+ objp->SetValue(g, new(g) JVALUE(Doc), key);
+ else if (arp)
+ arp->SetValue(g, new(g) JVALUE(Doc), i);
+ else
+ Top = Doc;
+
+ } // endif jsp
+
+ return RC_OK;
+ } // end of MakeDocument
+
+/***********************************************************************/
+/* JSON Cardinality: returns table size in number of rows. */
+/***********************************************************************/
+int TDBJSON::Cardinality(PGLOBAL g)
+ {
+ if (!g)
+ return (Xcol || Multiple) ? 0 : 1;
+ else if (Cardinal < 0)
+ if (!Multiple) {
+ if (MakeDocument(g) == RC_OK)
+ Cardinal = Doc->size();
+
+ } else
+ return 10;
+
+ return Cardinal;
+ } // end of Cardinality
+
+/***********************************************************************/
+/* JSON GetMaxSize: returns table size estimate in number of rows. */
+/***********************************************************************/
+int TDBJSON::GetMaxSize(PGLOBAL g)
+ {
+ if (MaxSize < 0)
+ MaxSize = Cardinality(g) * ((Xcol) ? Limit : 1);
+
+ return MaxSize;
+ } // end of GetMaxSize
+
+/***********************************************************************/
+/* ResetSize: call by TDBMUL when calculating size estimate. */
+/***********************************************************************/
+void TDBJSON::ResetSize(void)
+ {
+ MaxSize = Cardinal = -1;
+ Fpos = -1;
+ N = 0;
+ Done = false;
+ } // end of ResetSize
+
+/***********************************************************************/
+/* TDBJSON is not indexable. */
+/***********************************************************************/
+int TDBJSON::MakeIndex(PGLOBAL g, PIXDEF pxdf, bool add)
+ {
+ if (pxdf) {
+ strcpy(g->Message, "JSON not indexable when pretty = 2");
+ return RC_FX;
+ } else
+ return RC_OK;
+
+ } // end of MakeIndex
+
+/***********************************************************************/
+/* JSON Access Method opening routine. */
+/***********************************************************************/
+bool TDBJSON::OpenDB(PGLOBAL g)
+ {
+ if (Use == USE_OPEN) {
+ /*******************************************************************/
+ /* Table already open replace it at its beginning. */
+ /*******************************************************************/
+ for (PJCOL cp = (PJCOL)Columns; cp; cp = (PJCOL)cp->GetNext()) {
+ cp->Nx = 0;
+ cp->Arp = NULL;
+ } // endfor cp
+
+ Fpos= -1;
+ Spos = 0;
+ NextSame = false;
+ SameRow = 0;
+ return false;
+ } // endif use
+
+ /*********************************************************************/
+ /* OpenDB: initialize the JSON file processing. */
+ /*********************************************************************/
+ if (MakeDocument(g) != RC_OK)
+ return true;
+
+ if (Mode == MODE_INSERT)
+ switch (Jmode) {
+ case MODE_OBJECT: Row = new(g) JOBJECT; break;
+ case MODE_ARRAY: Row = new(g) JARRAY; break;
+ case MODE_VALUE: Row = new(g) JVALUE; break;
+ default:
+ sprintf(g->Message, "Invalid Jmode %d", Jmode);
+ return true;
+ } // endswitch Jmode
+
+ Use = USE_OPEN;
+ return false;
+ } // end of OpenDB
+
+/***********************************************************************/
+/* ReadDB: Data Base read routine for JSON access method. */
+/***********************************************************************/
+int TDBJSON::ReadDB(PGLOBAL g)
+ {
+ int rc;
+
+ N++;
+
+ if (NextSame) {
+ SameRow++;
+ rc = RC_OK;
+ } else if (++Fpos < (signed)Doc->size()) {
+ Row = Doc->GetValue(Fpos);
+
+ if (Row->GetType() == TYPE_JVAL)
+ Row = ((PJVAL)Row)->GetJson();
+
+ SameRow = 0;
+ rc = RC_OK;
+ } else
+ rc = RC_EF;
+
+ return rc;
+ } // end of ReadDB
+
+/***********************************************************************/
+/* WriteDB: Data Base write routine for JSON access method. */
+/***********************************************************************/
+int TDBJSON::WriteDB(PGLOBAL g)
+ {
+ if (Jmode == MODE_OBJECT) {
+ PJVAL vp = new(g) JVALUE(Row);
+
+ if (Mode == MODE_INSERT) {
+ Doc->AddValue(g, vp);
+ Row = new(g) JOBJECT;
+ } else if (Doc->SetValue(g, vp, Fpos))
+ return RC_FX;
+
+ } else if (Jmode == MODE_ARRAY) {
+ PJVAL vp = new(g) JVALUE(Row);
+
+ if (Mode == MODE_INSERT) {
+ Doc->AddValue(g, vp);
+ Row = new(g) JARRAY;
+ } else if (Doc->SetValue(g, vp, Fpos))
+ return RC_FX;
+
+ } else { // if (Jmode == MODE_VALUE)
+ if (Mode == MODE_INSERT)
+ Doc->AddValue(g, (PJVAL)Row);
+ else if (Doc->SetValue(g, (PJVAL)Row, Fpos))
+ return RC_FX;
+
+ } // endif Jmode
+
+ Changed = true;
+ return RC_OK;
+ } // end of WriteDB
+
+/***********************************************************************/
+/* Data Base delete line routine for JSON access method. */
+/***********************************************************************/
+int TDBJSON::DeleteDB(PGLOBAL g, int irc)
+ {
+ if (irc == RC_OK) {
+ // Deleted current row
+ if (Doc->DeleteValue(Fpos)) {
+ sprintf(g->Message, "Value %d does not exist", Fpos + 1);
+ return RC_FX;
+ } // endif Delete
+
+ Changed = true;
+ } else if (irc == RC_FX)
+ // Delete all
+ for (int i = 0; i < Doc->size(); i++) {
+ Doc->DeleteValue(i);
+ Changed = true;
+ } // endfor i
+
+ return RC_OK;
+ } // end of DeleteDB
+
+/***********************************************************************/
+/* Data Base close routine for JSON access methods. */
+/***********************************************************************/
+void TDBJSON::CloseDB(PGLOBAL g)
+ {
+ if (!Changed)
+ return;
+
+ // Save the modified document
+ char filename[_MAX_PATH];
+ PSZ msg;
+ FILE *fop;
+
+ Doc->InitArray(g);
+
+ // We used the file name relative to recorded datapath
+ PlugSetPath(filename, ((PJDEF)To_Def)->Fn, GetPath());
+
+ // Serialize the modified table
+ if (!(fop = fopen(filename, "wb"))) {
+ sprintf(g->Message, MSG(OPEN_MODE_ERROR),
+ "w", (int)errno, filename);
+ strcat(strcat(g->Message, ": "), strerror(errno));
+ } else if ((msg = Serialize(g, Top, fop, Pretty)))
+ puts(msg);
+
+ } // end of CloseDB
+
+/* -------------------------- End of json --------------------------- */
diff --git a/storage/connect/tabjson.h b/storage/connect/tabjson.h
new file mode 100644
index 00000000000..68f79a1526a
--- /dev/null
+++ b/storage/connect/tabjson.h
@@ -0,0 +1,197 @@
+/*************** tabjson H Declares Source Code File (.H) **************/
+/* Name: tabjson.h Version 1.0 */
+/* */
+/* (C) Copyright to the author Olivier BERTRAND 2014 - 2015 */
+/* */
+/* This file contains the JSON classes declares. */
+/***********************************************************************/
+#include "osutil.h"
+#include "block.h"
+#include "colblk.h"
+#include "json.h"
+
+enum JMODE {MODE_OBJECT, MODE_ARRAY, MODE_VALUE};
+
+typedef class JSONDEF *PJDEF;
+typedef class TDBJSON *PJTDB;
+typedef class JSONCOL *PJCOL;
+
+class TDBJSN;
+
+/***********************************************************************/
+/* The JSON tree node. Can be an Object or an Array. */
+/***********************************************************************/
+typedef struct _jnode {
+ PSZ Key; // The key used for object
+ OPVAL Op; // Operator used for this node
+ PVAL CncVal; // To cont value used for OP_CNC
+ int Rank; // The rank in array
+} JNODE, *PJNODE;
+
+/***********************************************************************/
+/* JSON table. */
+/***********************************************************************/
+class JSONDEF : public DOSDEF { /* Table description */
+ friend class TDBJSON;
+ friend class TDBJSN;
+ public:
+ // Constructor
+ JSONDEF(void);
+
+ // Implementation
+ virtual const char *GetType(void) {return "JSON";}
+
+ // Methods
+ virtual bool DefineAM(PGLOBAL g, LPCSTR am, int poff);
+ virtual PTDB GetTable(PGLOBAL g, MODE m);
+
+ protected:
+ // Members
+ JMODE Jmode; /* MODE_OBJECT by default */
+ char *Objname; /* Name of first level object */
+ char *Xcol; /* Name of expandable column */
+ int Limit; /* Limit of multiple values */
+ int Pretty; /* Depends on file structure */
+ bool Strict; /* Strict syntax checking */
+ }; // end of JSONDEF
+
+/* -------------------------- TDBJSN class --------------------------- */
+
+/***********************************************************************/
+/* This is the JSN Access Method class declaration. */
+/* The table is a DOS file, each record being a JSON object. */
+/***********************************************************************/
+class TDBJSN : public TDBDOS {
+ friend class JSONCOL;
+ public:
+ // Constructor
+ TDBJSN(PJDEF tdp, PTXF txfp);
+ TDBJSN(TDBJSN *tdbp);
+
+ // Implementation
+ virtual AMT GetAmType(void) {return TYPE_AM_JSN;}
+ virtual bool SkipHeader(PGLOBAL g);
+ virtual PTDB Duplicate(PGLOBAL g) {return (PTDB)new(g) TDBJSN(this);}
+
+ // Methods
+ virtual PTDB CopyOne(PTABS t);
+ virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n);
+ virtual PCOL InsertSpecialColumn(PGLOBAL g, PCOL colp);
+ virtual int RowNumber(PGLOBAL g, BOOL b = FALSE)
+ {return (b) ? N : Fpos + 1;}
+
+ // Database routines
+ virtual int Cardinality(PGLOBAL g);
+ virtual int GetMaxSize(PGLOBAL g);
+ virtual bool OpenDB(PGLOBAL g);
+ virtual bool PrepareWriting(PGLOBAL g);
+ virtual int ReadDB(PGLOBAL g);
+
+ protected:
+ // Members
+ PJSON Row; // The current row
+ PJCOL Colp; // The multiple column
+ JMODE Jmode; // MODE_OBJECT by default
+ char *Xcol; // Name of expandable column
+ int Fpos; // The current row index
+ int Spos; // DELETE start index
+ int N; // The current Rownum
+ int Limit; // Limit of multiple values
+ int Pretty; // Depends on file structure
+ bool Strict; // Strict syntax checking
+ bool NextSame; // Same next row
+ bool Comma; // Row has final comma
+ int SameRow; // Same row nb
+ int Xval; // Index of expandable array
+ }; // end of class TDBJSN
+
+/* -------------------------- JSONCOL class -------------------------- */
+
+/***********************************************************************/
+/* Class JSONCOL: JSON access method column descriptor. */
+/***********************************************************************/
+class JSONCOL : public DOSCOL {
+ friend class TDBJSN;
+ friend class TDBJSON;
+ public:
+ // Constructors
+ JSONCOL(PGLOBAL g, PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i);
+ JSONCOL(JSONCOL *colp, PTDB tdbp); // Constructor used in copy process
+
+ // Implementation
+ virtual int GetAmType(void) {return Tjp->GetAmType();}
+
+ // Methods
+ virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check);
+ bool ParseJpath(PGLOBAL g);
+ virtual void ReadColumn(PGLOBAL g);
+ virtual void WriteColumn(PGLOBAL g);
+
+ protected:
+ bool CheckExpand(PGLOBAL g, int i, PSZ nm, bool b);
+ bool SetArrayOptions(PGLOBAL g, char *p, int i, PSZ nm);
+ PJSON GetRow(PGLOBAL g, int mode);
+ void SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n);
+
+ // Default constructor not to be used
+ JSONCOL(void) {}
+
+ // Members
+ TDBJSN *Tjp; // To the JSN table block
+ PVAL MulVal; // To value used by multiple column
+ PJAR Arp; // The intermediate array
+ char *Jpath; // The json path
+ JNODE *Nodes ; // The intermediate objects
+ int Nod; // The number of intermediate objects
+ int Ival; // Index of multiple values
+ int Nx; // The last read sub-row
+ bool Xpd; // True for expandable column
+ bool Parsed; // True when parsed
+ }; // end of class JSONCOL
+
+/* -------------------------- TDBJSON class -------------------------- */
+
+/***********************************************************************/
+/* This is the JSON Access Method class declaration. */
+/***********************************************************************/
+class TDBJSON : public TDBJSN {
+ friend class JSONCOL;
+ public:
+ // Constructor
+ TDBJSON(PJDEF tdp, PTXF txfp);
+ TDBJSON(PJTDB tdbp);
+
+ // Implementation
+ virtual AMT GetAmType(void) {return TYPE_AM_JSON;}
+ virtual PTDB Duplicate(PGLOBAL g) {return (PTDB)new(g) TDBJSON(this);}
+
+ // Methods
+ virtual PTDB CopyOne(PTABS t);
+
+ // Database routines
+ virtual int Cardinality(PGLOBAL g);
+ virtual int GetMaxSize(PGLOBAL g);
+ virtual void ResetSize(void);
+ virtual int GetRecpos(void) {return Fpos;}
+ virtual bool OpenDB(PGLOBAL g);
+ virtual int ReadDB(PGLOBAL g);
+ virtual bool PrepareWriting(PGLOBAL g) {return false;}
+ virtual int WriteDB(PGLOBAL g);
+ virtual int DeleteDB(PGLOBAL g, int irc);
+ virtual void CloseDB(PGLOBAL g);
+
+ // Optimization routines
+ virtual int MakeIndex(PGLOBAL g, PIXDEF pxdf, bool add);
+
+ protected:
+ int MakeNewDoc(PGLOBAL g);
+ int MakeDocument(PGLOBAL g);
+
+ // Members
+ PJSON Top; // The file JSON tree
+ PJAR Doc; // The document array
+ char *Objname; // The table object name
+ int Multiple; // 0: No 1: DIR 2: Section 3: filelist
+ bool Done; // True when document parsing is done
+ bool Changed; // After Update, Insert or Delete
+ }; // end of class TDBJSON
diff --git a/storage/connect/table.cpp b/storage/connect/table.cpp
index b093e2102c2..cbd5910d8c8 100644
--- a/storage/connect/table.cpp
+++ b/storage/connect/table.cpp
@@ -146,6 +146,7 @@ TDBASE::TDBASE(PTABDEF tdp) : TDB(tdp)
Knum = 0;
Read_Only = (tdp) ? tdp->IsReadOnly() : false;
m_data_charset= (tdp) ? tdp->data_charset() : NULL;
+ csname = (tdp) ? tdp->csname : NULL;
} // end of TDBASE constructor
TDBASE::TDBASE(PTDBASE tdbp) : TDB(tdbp)
@@ -161,6 +162,7 @@ TDBASE::TDBASE(PTDBASE tdbp) : TDB(tdbp)
Knum = tdbp->Knum;
Read_Only = tdbp->Read_Only;
m_data_charset= tdbp->m_data_charset;
+ csname = tdbp->csname;
} // end of TDBASE copy constructor
/***********************************************************************/
diff --git a/storage/connect/tabmysql.cpp b/storage/connect/tabmysql.cpp
index 3ec9a1feaee..54627ba43fd 100644
--- a/storage/connect/tabmysql.cpp
+++ b/storage/connect/tabmysql.cpp
@@ -777,7 +777,7 @@ int TDBMYSQL::Cardinality(PGLOBAL g)
char query[96];
MYSQLC myc;
- if (myc.Open(g, Host, Database, User, Pwd, Port))
+ if (myc.Open(g, Host, Database, User, Pwd, Port, csname))
return -1;
strcpy(query, "SELECT COUNT(*) FROM ");
@@ -871,7 +871,7 @@ bool TDBMYSQL::OpenDB(PGLOBAL g)
/* servers allowing concurency in getting results ??? */
/*********************************************************************/
if (!Myc.Connected()) {
- if (Myc.Open(g, Host, Database, User, Pwd, Port))
+ if (Myc.Open(g, Host, Database, User, Pwd, Port, csname))
return true;
} // endif Connected
diff --git a/storage/connect/tabodbc.cpp b/storage/connect/tabodbc.cpp
index bbc17129aaf..2b771bcbead 100644
--- a/storage/connect/tabodbc.cpp
+++ b/storage/connect/tabodbc.cpp
@@ -1,11 +1,11 @@
/************* Tabodbc C++ Program Source Code File (.CPP) *************/
/* PROGRAM NAME: TABODBC */
/* ------------- */
-/* Version 2.8 */
+/* Version 2.9 */
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 2000-2014 */
+/* (C) Copyright to the author Olivier BERTRAND 2000-2015 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -94,8 +94,8 @@ bool ExactInfo(void);
ODBCDEF::ODBCDEF(void)
{
Connect= Tabname= Tabschema= Tabcat= Srcdef= Qchar= Qrystr= Sep= NULL;
- Catver = Options = Quoted = Maxerr = Maxres = 0;
- Scrollable = Xsrc = false;
+ Catver = Options = Cto = Qto = Quoted = Maxerr = Maxres = 0;
+ Scrollable = Memory = Xsrc = false;
} // end of ODBCDEF constructor
/***********************************************************************/
@@ -130,10 +130,10 @@ bool ODBCDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
Quoted = GetIntCatInfo("Quoted", 0);
Options = ODBConn::noOdbcDialog;
//Options = ODBConn::noOdbcDialog | ODBConn::useCursorLib;
-
- if ((Scrollable = GetBoolCatInfo("Scrollable", false)))
- Elemt = 0; // Not compatible with extended fetch
-
+ Cto= GetIntCatInfo("ConnectTimeout", DEFAULT_LOGIN_TIMEOUT);
+ Qto= GetIntCatInfo("QueryTimeout", DEFAULT_QUERY_TIMEOUT);
+ Scrollable = GetBoolCatInfo("Scrollable", false);
+ Memory = GetBoolCatInfo("Memory", false);
Pseudo = 2; // FILID is Ok but not ROWID
return false;
} // end of DefineAM
@@ -195,9 +195,12 @@ TDBODBC::TDBODBC(PODEF tdp) : TDBASE(tdp)
Qrystr = tdp->Qrystr;
Sep = tdp->GetSep();
Options = tdp->Options;
+ Cto = tdp->Cto;
+ Qto = tdp->Qto;
Quoted = MY_MAX(0, tdp->GetQuoted());
Rows = tdp->GetElemt();
Catver = tdp->Catver;
+ Memory = (tdp->Memory) ? 1 : 0;
Scrollable = tdp->Scrollable;
} else {
Connect = NULL;
@@ -208,9 +211,12 @@ TDBODBC::TDBODBC(PODEF tdp) : TDBASE(tdp)
Qrystr = NULL;
Sep = 0;
Options = 0;
+ Cto = DEFAULT_LOGIN_TIMEOUT;
+ Qto = DEFAULT_QUERY_TIMEOUT;
Quoted = 0;
Rows = 0;
Catver = 0;
+ Memory = 0;
Scrollable = false;
} // endif tdp
@@ -220,6 +226,7 @@ TDBODBC::TDBODBC(PODEF tdp) : TDBASE(tdp)
//Where = NULL;
MulConn = NULL;
DBQ = NULL;
+ Qrp = NULL;
Fpos = 0;
AftRows = 0;
CurNum = 0;
@@ -238,6 +245,7 @@ TDBODBC::TDBODBC(PTDBODBC tdbp) : TDBASE(tdbp)
Catalog = tdbp->Catalog;
Srcdef = tdbp->Srcdef;
Qrystr = tdbp->Qrystr;
+ Memory = tdbp->Memory;
Scrollable = tdbp->Scrollable;
Quote = tdbp->Quote;
Query = tdbp->Query;
@@ -246,6 +254,8 @@ TDBODBC::TDBODBC(PTDBODBC tdbp) : TDBASE(tdbp)
MulConn = tdbp->MulConn;
DBQ = tdbp->DBQ;
Options = tdbp->Options;
+ Cto = tdbp->Cto;
+ Qto = tdbp->Qto;
Quoted = tdbp->Quoted;
Rows = tdbp->Rows;
Fpos = tdbp->Fpos;
@@ -256,6 +266,7 @@ TDBODBC::TDBODBC(PTDBODBC tdbp) : TDBASE(tdbp)
Rbuf = tdbp->Rbuf;
BufSize = tdbp->BufSize;
Nparm = tdbp->Nparm;
+ Qrp = tdbp->Qrp;
} // end of TDBODBC copy constructor
// Method
@@ -687,6 +698,9 @@ int TDBODBC::Cardinality(PGLOBAL g)
char qry[96], tbn[64];
ODBConn *ocp = new(g) ODBConn(g, this);
+ ocp->SetLoginTimeout((DWORD)Cto);
+ ocp->SetQueryTimeout((DWORD)Qto);
+
if (ocp->Open(Connect, Options) < 1)
return -1;
@@ -758,18 +772,25 @@ bool TDBODBC::OpenDB(PGLOBAL g)
/*******************************************************************/
/* Table already open, just replace it at its beginning. */
/*******************************************************************/
-// if (To_Kindex)
- /*****************************************************************/
- /* Table is to be accessed through a sorted index table. */
- /*****************************************************************/
-// To_Kindex->Reset();
-
-// rewind(Stream); >>>>>>> Something to be done with Cursor <<<<<<<
- if (Ocp->Rewind(Query, (PODBCCOL)Columns)) {
- Ocp->Close();
- return true;
- } // endif Rewind
+ if (Memory == 1) {
+ if ((Qrp = Ocp->AllocateResult(g)))
+ Memory = 2; // Must be filled
+ else
+ Memory = 0; // Allocation failed, don't use it
+
+ } else if (Memory == 2)
+ Memory = 3; // Ok to use memory result
+
+ if (Memory < 3) {
+ // Method will depend on cursor type
+ if ((Rbuf = Ocp->Rewind(Query, (PODBCCOL)Columns)) < 0) {
+ Ocp->Close();
+ return true;
+ } // endif Rewind
+
+ } // endif Memory
+ CurNum = 0;
Fpos = 0;
return false;
} // endif use
@@ -781,9 +802,11 @@ bool TDBODBC::OpenDB(PGLOBAL g)
/* and if so to allocate just a new result set. But this only for */
/* drivers allowing concurency in getting results ??? */
/*********************************************************************/
- if (!Ocp)
+ if (!Ocp) {
Ocp = new(g) ODBConn(g, this);
- else if (Ocp->IsOpen())
+ Ocp->SetLoginTimeout((DWORD)Cto);
+ Ocp->SetQueryTimeout((DWORD)Qto);
+ } else if (Ocp->IsOpen())
Ocp->Close();
if (Ocp->Open(Connect, Options) < 1)
@@ -870,20 +893,29 @@ int TDBODBC::ReadDB(PGLOBAL g)
if (To_Kindex) {
// Direct access of ODBC tables is not implemented yet
strcpy(g->Message, MSG(NO_ODBC_DIRECT));
- longjmp(g->jumper[g->jump_level], GetAmType());
+ return RC_FX;
} // endif To_Kindex
/*********************************************************************/
/* Now start the reading process. */
/* Here is the place to fetch the line(s). */
/*********************************************************************/
- if (++CurNum >= Rbuf) {
- Rbuf = Ocp->Fetch();
- CurNum = 0;
- } // endif CurNum
+ if (Memory != 3) {
+ if (++CurNum >= Rbuf) {
+ Rbuf = Ocp->Fetch();
+ CurNum = 0;
+ } // endif CurNum
- rc = (Rbuf > 0) ? RC_OK : (Rbuf == 0) ? RC_EF : RC_FX;
- Fpos++; // Used for progress info
+ rc = (Rbuf > 0) ? RC_OK : (Rbuf == 0) ? RC_EF : RC_FX;
+ } else // Getting result from memory
+ rc = (Fpos < Qrp->Nblin) ? RC_OK : RC_EF;
+
+ if (rc == RC_OK) {
+ if (Memory == 2)
+ Qrp->Nblin++;
+
+ Fpos++; // Used for memory
+ } // endif rc
if (trace > 1)
htrc(" Read: Rbuf=%d rc=%d\n", Rbuf, rc);
@@ -966,6 +998,7 @@ ODBCCOL::ODBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am)
} // endif cprec
// Set additional ODBC access method information for column.
+ Crp = NULL;
//Long = cdp->GetLong();
Long = Precision;
//strcpy(F_Date, cdp->F_Date);
@@ -987,6 +1020,7 @@ ODBCCOL::ODBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am)
/***********************************************************************/
ODBCCOL::ODBCCOL(void) : COLBLK()
{
+ Crp = NULL;
Buf_Type = TYPE_INT; // This is a count(*) column
// Set additional Dos access method information for column.
Long = sizeof(int);
@@ -1005,6 +1039,7 @@ ODBCCOL::ODBCCOL(void) : COLBLK()
/***********************************************************************/
ODBCCOL::ODBCCOL(ODBCCOL *col1, PTDB tdbp) : COLBLK(col1, tdbp)
{
+ Crp = col1->Crp;
Long = col1->Long;
//strcpy(F_Date, col1->F_Date);
To_Val = col1->To_Val;
@@ -1070,7 +1105,20 @@ bool ODBCCOL::SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check)
void ODBCCOL::ReadColumn(PGLOBAL g)
{
PTDBODBC tdbp = (PTDBODBC)To_Tdb;
- int n = tdbp->CurNum;
+ int i = tdbp->Fpos - 1, n = tdbp->CurNum;
+
+ if (tdbp->Memory == 3) {
+ // Get the value from the stored memory
+ if (Crp->Nulls && Crp->Nulls[i] == '*') {
+ Value->Reset();
+ Value->SetNull(true);
+ } else {
+ Value->SetValue_pvblk(Crp->Kdata, i);
+ Value->SetNull(false);
+ } // endif Nulls
+
+ return;
+ } // endif Memory
if (StrLen[n] == SQL_NULL_DATA) {
// Null value
@@ -1078,7 +1126,7 @@ void ODBCCOL::ReadColumn(PGLOBAL g)
Value->SetNull(true);
Value->Reset();
- return;
+ goto put;
} else
Value->SetNull(false);
@@ -1117,6 +1165,21 @@ void ODBCCOL::ReadColumn(PGLOBAL g)
Name, tdbp->Rows, Bufp, Buf_Type, Value->GetCharString(buf));
} // endif Trace
+ put:
+ if (tdbp->Memory != 2)
+ return;
+
+ /*********************************************************************/
+ /* Fill the allocated result structure. */
+ /*********************************************************************/
+ if (Value->IsNull()) {
+ if (Crp->Nulls)
+ Crp->Nulls[i] = '*'; // Null value
+
+ Crp->Kdata->Reset(i);
+ } else
+ Crp->Kdata->SetValue(Value, i);
+
} // end of ReadColumn
/***********************************************************************/
@@ -1350,9 +1413,11 @@ bool TDBXDBC::OpenDB(PGLOBAL g)
/* and if so to allocate just a new result set. But this only for */
/* drivers allowing concurency in getting results ??? */
/*********************************************************************/
- if (!Ocp)
+ if (!Ocp) {
Ocp = new(g) ODBConn(g, this);
- else if (Ocp->IsOpen())
+ Ocp->SetLoginTimeout((DWORD)Cto);
+ Ocp->SetQueryTimeout((DWORD)Qto);
+ } else if (Ocp->IsOpen())
Ocp->Close();
if (Ocp->Open(Connect, Options) < 1)
@@ -1489,6 +1554,8 @@ TDBOTB::TDBOTB(PODEF tdp) : TDBDRV(tdp)
Dsn = tdp->GetConnect();
Schema = tdp->GetTabschema();
Tab = tdp->GetTabname();
+ Cto = tdp->Cto;
+ Qto = tdp->Qto;
} // end of TDBOTB constructor
/***********************************************************************/
@@ -1496,7 +1563,7 @@ TDBOTB::TDBOTB(PODEF tdp) : TDBDRV(tdp)
/***********************************************************************/
PQRYRES TDBOTB::GetResult(PGLOBAL g)
{
- return ODBCTables(g, Dsn, Schema, Tab, Maxres, false);
+ return ODBCTables(g, Dsn, Schema, Tab, Maxres, Cto, Qto, false);
} // end of GetResult
/* ---------------------------TDBOCL class --------------------------- */
@@ -1506,7 +1573,7 @@ PQRYRES TDBOTB::GetResult(PGLOBAL g)
/***********************************************************************/
PQRYRES TDBOCL::GetResult(PGLOBAL g)
{
- return ODBCColumns(g, Dsn, Schema, Tab, NULL, Maxres, false);
+ return ODBCColumns(g, Dsn, Schema, Tab, NULL, Maxres, Cto, Qto, false);
} // end of GetResult
/* ------------------------ End of Tabodbc --------------------------- */
diff --git a/storage/connect/tabodbc.h b/storage/connect/tabodbc.h
index f042b0c73ca..d8644c8b6de 100644
--- a/storage/connect/tabodbc.h
+++ b/storage/connect/tabodbc.h
@@ -1,7 +1,7 @@
/*************** Tabodbc H Declares Source Code File (.H) **************/
-/* Name: TABODBC.H Version 1.6 */
+/* Name: TABODBC.H Version 1.8 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2000-2013 */
+/* (C) Copyright to the author Olivier BERTRAND 2000-2015 */
/* */
/* This file contains the TDBODBC classes declares. */
/***********************************************************************/
@@ -24,6 +24,7 @@ class DllExport ODBCDEF : public TABDEF { /* Logical table description */
friend class TDBODBC;
friend class TDBXDBC;
friend class TDBDRV;
+ friend class TDBOTB;
public:
// Constructor
ODBCDEF(void);
@@ -56,10 +57,13 @@ class DllExport ODBCDEF : public TABDEF { /* Logical table description */
PSZ Sep; /* Decimal separator */
int Catver; /* ODBC version for catalog functions */
int Options; /* Open connection options */
+ int Cto; /* Open connection timeout */
+ int Qto; /* Query (command) timeout */
int Quoted; /* Identifier quoting level */
int Maxerr; /* Maxerr for an Exec table */
int Maxres; /* Maxres for a catalog table */
bool Scrollable; /* Use scrollable cursor */
+ bool Memory; /* Put result set in memory */
bool Xsrc; /* Execution type */
}; // end of ODBCDEF
@@ -134,6 +138,8 @@ class TDBODBC : public TDBASE {
char *Qrystr; // The original query
char Sep; // The decimal separator
int Options; // Connect options
+ int Cto; // Connect timeout
+ int Qto; // Query timeout
int Quoted; // The identifier quoting level
int Fpos; // Position of last read record
int AftRows; // The number of affected rows
@@ -143,7 +149,9 @@ class TDBODBC : public TDBASE {
int Rbuf; // Number of lines read in buffer
int BufSize; // Size of connect string buffer
int Nparm; // The number of statement parameters
+ int Memory; // 0: No 1: Alloc 2: Put 3: Get
bool Scrollable; // Use scrollable cursor
+ PQRYRES Qrp; // Points to storage result
}; // end of class TDBODBC
/***********************************************************************/
@@ -162,6 +170,7 @@ class ODBCCOL : public COLBLK {
SQLLEN *GetStrLen(void) {return StrLen;}
int GetRank(void) {return Rank;}
// PVBLK GetBlkp(void) {return Blkp;}
+ void SetCrp(PCOLRES crp) {Crp = crp;}
// Methods
virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check);
@@ -178,6 +187,7 @@ class ODBCCOL : public COLBLK {
// Members
TIMESTAMP_STRUCT *Sqlbuf; // To get SQL_TIMESTAMP's
+ PCOLRES Crp; // To storage result
void *Bufp; // To extended buffer
PVBLK Blkp; // To Value Block
//char F_Date[12]; // Internal Date format
@@ -306,6 +316,8 @@ class TDBOTB : public TDBDRV {
char *Dsn; // Points to connection string
char *Schema; // Points to schema name or NULL
char *Tab; // Points to ODBC table name or pattern
+ int Cto; // Connect timeout
+ int Qto; // Query timeout
}; // end of class TDBOTB
/***********************************************************************/
diff --git a/storage/connect/value.cpp b/storage/connect/value.cpp
index 81d00862703..1cc40473433 100644
--- a/storage/connect/value.cpp
+++ b/storage/connect/value.cpp
@@ -330,7 +330,7 @@ int ConvertType(int target, int type, CONV kind, bool match)
/***********************************************************************/
/* AllocateConstant: allocates a constant Value. */
/***********************************************************************/
-PVAL AllocateValue(PGLOBAL g, void *value, short type)
+PVAL AllocateValue(PGLOBAL g, void *value, short type, short prec)
{
PVAL valp;
@@ -351,7 +351,7 @@ PVAL AllocateValue(PGLOBAL g, void *value, short type)
valp = new(g) TYPVAL<longlong>(*(longlong*)value, TYPE_BIGINT);
break;
case TYPE_DOUBLE:
- valp = new(g) TYPVAL<double>(*(double *)value, TYPE_DOUBLE, 2);
+ valp = new(g) TYPVAL<double>(*(double *)value, TYPE_DOUBLE, prec);
break;
case TYPE_TINY:
valp = new(g) TYPVAL<char>(*(char *)value, TYPE_TINY);
@@ -434,6 +434,7 @@ PVAL AllocateValue(PGLOBAL g, PVAL valp, int newtype, int uns)
{
PSZ p, sp;
bool un = (uns < 0) ? false : (uns > 0) ? true : valp->IsUnsigned();
+ PVAL vp;
if (newtype == TYPE_VOID) // Means allocate a value of the same type
newtype = valp->GetType();
@@ -445,53 +446,55 @@ PVAL AllocateValue(PGLOBAL g, PVAL valp, int newtype, int uns)
if ((sp = valp->GetCharString(p)) != p)
strcpy (p, sp);
- valp = new(g) TYPVAL<PSZ>(g, p, valp->GetValLen(), valp->GetValPrec());
+ vp = new(g) TYPVAL<PSZ>(g, p, valp->GetValLen(), valp->GetValPrec());
break;
case TYPE_SHORT:
if (un)
- valp = new(g) TYPVAL<ushort>(valp->GetUShortValue(),
- TYPE_SHORT, 0, true);
+ vp = new(g) TYPVAL<ushort>(valp->GetUShortValue(),
+ TYPE_SHORT, 0, true);
else
- valp = new(g) TYPVAL<short>(valp->GetShortValue(), TYPE_SHORT);
+ vp = new(g) TYPVAL<short>(valp->GetShortValue(), TYPE_SHORT);
break;
case TYPE_INT:
if (un)
- valp = new(g) TYPVAL<uint>(valp->GetUIntValue(), TYPE_INT, 0, true);
+ vp = new(g) TYPVAL<uint>(valp->GetUIntValue(), TYPE_INT, 0, true);
else
- valp = new(g) TYPVAL<int>(valp->GetIntValue(), TYPE_INT);
+ vp = new(g) TYPVAL<int>(valp->GetIntValue(), TYPE_INT);
break;
case TYPE_BIGINT:
if (un)
- valp = new(g) TYPVAL<ulonglong>(valp->GetUBigintValue(),
- TYPE_BIGINT, 0, true);
+ vp = new(g) TYPVAL<ulonglong>(valp->GetUBigintValue(),
+ TYPE_BIGINT, 0, true);
else
- valp = new(g) TYPVAL<longlong>(valp->GetBigintValue(), TYPE_BIGINT);
+ vp = new(g) TYPVAL<longlong>(valp->GetBigintValue(), TYPE_BIGINT);
break;
case TYPE_DATE:
- valp = new(g) DTVAL(g, valp->GetIntValue());
+ vp = new(g) DTVAL(g, valp->GetIntValue());
break;
case TYPE_DOUBLE:
- valp = new(g) TYPVAL<double>(valp->GetFloatValue(), TYPE_DOUBLE,
- valp->GetValPrec());
+ vp = new(g) TYPVAL<double>(valp->GetFloatValue(), TYPE_DOUBLE,
+ (uns) ? uns : valp->GetValPrec());
break;
case TYPE_TINY:
if (un)
- valp = new(g) TYPVAL<uchar>(valp->GetUTinyValue(),
+ vp = new(g) TYPVAL<uchar>(valp->GetUTinyValue(),
TYPE_TINY, 0, true);
else
- valp = new(g) TYPVAL<char>(valp->GetTinyValue(), TYPE_TINY);
+ vp = new(g) TYPVAL<char>(valp->GetTinyValue(), TYPE_TINY);
break;
default:
sprintf(g->Message, MSG(BAD_VALUE_TYPE), newtype);
return NULL;
} // endswitch type
-
- valp->SetGlobal(g);
- return valp;
+
+ vp->SetNullable(valp->GetNullable());
+ vp->SetNull(valp->IsNull());
+ vp->SetGlobal(g);
+ return vp;
} // end of AllocateValue
/* -------------------------- Class VALUE ---------------------------- */
@@ -542,6 +545,15 @@ BYTE VALUE::TestValue(PVAL vp)
return (n > 0) ? 0x04 : (n < 0) ? 0x02 : 0x01;
} // end of TestValue
+/***********************************************************************/
+/* Compute a function on a string. */
+/***********************************************************************/
+bool VALUE::Compute(PGLOBAL g, PVAL *vp, int np, OPVAL op)
+ {
+ strcpy(g->Message, "Compute not implemented for this value type");
+ return true;
+ } // end of Compute
+
/* -------------------------- Class TYPVAL ---------------------------- */
/***********************************************************************/
@@ -931,6 +943,202 @@ int TYPVAL<TYPE>::CompareValue(PVAL vp)
} // end of CompareValue
/***********************************************************************/
+/* Return max type value if b is true, else min type value. */
+/***********************************************************************/
+template <>
+short TYPVAL<short>::MinMaxVal(bool b)
+ {return (b) ? INT_MAX16 : INT_MIN16;}
+
+template <>
+ushort TYPVAL<ushort>::MinMaxVal(bool b)
+ {return (b) ? UINT_MAX16 : 0;}
+
+template <>
+int TYPVAL<int>::MinMaxVal(bool b)
+ {return (b) ? INT_MAX32 : INT_MIN32;}
+
+template <>
+uint TYPVAL<uint>::MinMaxVal(bool b)
+ {return (b) ? UINT_MAX32 : 0;}
+
+template <>
+longlong TYPVAL<longlong>::MinMaxVal(bool b)
+ {return (b) ? INT_MAX64 : INT_MIN64;}
+
+template <>
+ulonglong TYPVAL<ulonglong>::MinMaxVal(bool b)
+ {return (b) ? 0xFFFFFFFFFFFFFFFFLL : 0;}
+
+template <>
+double TYPVAL<double>::MinMaxVal(bool b)
+ {assert(false); return 0.0;}
+
+template <>
+char TYPVAL<char>::MinMaxVal(bool b)
+ {return (b) ? INT_MAX8 : INT_MIN8;}
+
+template <>
+uchar TYPVAL<uchar>::MinMaxVal(bool b)
+ {return (b) ? UINT_MAX8 : 0;}
+
+/***********************************************************************/
+/* SafeAdd: adds a value and test whether overflow/underflow occured. */
+/***********************************************************************/
+template <class TYPE>
+TYPE TYPVAL<TYPE>::SafeAdd(TYPE n1, TYPE n2)
+ {
+ PGLOBAL& g = Global;
+ TYPE n = n1 + n2;
+
+ if ((n2 > 0) && (n < n1)) {
+ // Overflow
+ strcpy(g->Message, MSG(FIX_OVFLW_ADD));
+ longjmp(g->jumper[g->jump_level], 138);
+ } else if ((n2 < 0) && (n > n1)) {
+ // Underflow
+ strcpy(g->Message, MSG(FIX_UNFLW_ADD));
+ longjmp(g->jumper[g->jump_level], 138);
+ } // endif's n2
+
+ return n;
+ } // end of SafeAdd
+
+template <>
+inline double TYPVAL<double>::SafeAdd(double n1, double n2)
+ {
+ return n1 + n2;
+ } // end of SafeAdd
+
+/***********************************************************************/
+/* SafeMult: multiply values and test whether overflow occured. */
+/***********************************************************************/
+template <class TYPE>
+TYPE TYPVAL<TYPE>::SafeMult(TYPE n1, TYPE n2)
+ {
+ PGLOBAL& g = Global;
+ double n = (double)n1 * (double)n2;
+
+ if (n > MinMaxVal(true)) {
+ // Overflow
+ strcpy(g->Message, MSG(FIX_OVFLW_TIMES));
+ longjmp(g->jumper[g->jump_level], 138);
+ } else if (n < MinMaxVal(false)) {
+ // Underflow
+ strcpy(g->Message, MSG(FIX_UNFLW_TIMES));
+ longjmp(g->jumper[g->jump_level], 138);
+ } // endif's n2
+
+ return (TYPE)n;
+ } // end of SafeMult
+
+template <>
+inline double TYPVAL<double>::SafeMult(double n1, double n2)
+ {
+ return n1 * n2;
+ } // end of SafeMult
+
+/***********************************************************************/
+/* Compute defined functions for the type. */
+/***********************************************************************/
+template <class TYPE>
+bool TYPVAL<TYPE>::Compute(PGLOBAL g, PVAL *vp, int np, OPVAL op)
+ {
+ bool rc = false;
+ TYPE val[2];
+
+ assert(np == 2);
+
+ for (int i = 0; i < np; i++)
+ val[i] = GetTypedValue(vp[i]);
+
+ switch (op) {
+ case OP_ADD:
+ Tval = SafeAdd(val[0], val[1]);
+ break;
+ case OP_MULT:
+ Tval = SafeMult(val[0], val[1]);
+ break;
+ case OP_DIV:
+ if (!val[1]) {
+ strcpy(g->Message, MSG(ZERO_DIVIDE));
+ return true;
+ } // endif
+
+ Tval = val[0] / val[1];
+ break;
+ default:
+ rc = Compall(g, vp, np, op);
+ break;
+ } // endswitch op
+
+ return rc;
+ } // end of Compute
+
+template <>
+bool TYPVAL<double>::Compute(PGLOBAL g, PVAL *vp, int np, OPVAL op)
+ {
+ bool rc = false;
+ double val[2];
+
+ assert(np == 2);
+
+ for (int i = 0; i < np; i++)
+ val[i] = vp[i]->GetFloatValue();
+
+ switch (op) {
+ case OP_ADD:
+ Tval = val[0] + val[1];
+ break;
+ case OP_MULT:
+ Tval = val[0] * val[1];
+ break;
+ default:
+ rc = Compall(g, vp, np, op);
+ } // endswitch op
+
+ return rc;
+ } // end of Compute
+
+/***********************************************************************/
+/* Compute a function for all types. */
+/***********************************************************************/
+template <class TYPE>
+bool TYPVAL<TYPE>::Compall(PGLOBAL g, PVAL *vp, int np, OPVAL op)
+ {
+ TYPE val[2];
+
+ for (int i = 0; i < np; i++)
+ val[i] = GetTypedValue(vp[i]);
+
+ switch (op) {
+ case OP_DIV:
+ if (val[0]) {
+ if (!val[1]) {
+ strcpy(g->Message, MSG(ZERO_DIVIDE));
+ return true;
+ } // endif
+
+ Tval = val[0] / val[1];
+ } else
+ Tval = 0;
+
+ break;
+ case OP_MIN:
+ Tval = MY_MIN(val[0], val[1]);
+ break;
+ case OP_MAX:
+ Tval = MY_MAX(val[0], val[1]);
+ break;
+ default:
+// sprintf(g->Message, MSG(BAD_EXP_OPER), op);
+ strcpy(g->Message, "Function not supported");
+ return true;
+ } // endswitch op
+
+ return false;
+ } // end of Compall
+
+/***********************************************************************/
/* FormatValue: This function set vp (a STRING value) to the string */
/* constructed from its own value formated using the fmt format. */
/* This function assumes that the format matches the value type. */
@@ -1410,6 +1618,45 @@ int TYPVAL<PSZ>::CompareValue(PVAL vp)
} // end of CompareValue
/***********************************************************************/
+/* Compute a function on a string. */
+/***********************************************************************/
+bool TYPVAL<PSZ>::Compute(PGLOBAL g, PVAL *vp, int np, OPVAL op)
+ {
+ char *p[2], val[2][32];
+ int i;
+
+ for (i = 0; i < np; i++)
+ p[i] = vp[i]->GetCharString(val[i]);
+
+ switch (op) {
+ case OP_CNC:
+ assert(np == 1 || np == 2);
+
+ if (np == 2)
+ strncpy(Strp, p[0], Len);
+
+ if ((i = Len - (signed)strlen(Strp)) > 0)
+ strncat(Strp, p[np - 1], i);
+
+ break;
+ case OP_MIN:
+ assert(np == 2);
+ strcpy(Strp, (strcmp(p[0], p[1]) < 0) ? p[0] : p[1]);
+ break;
+ case OP_MAX:
+ assert(np == 2);
+ strcpy(Strp, (strcmp(p[0], p[1]) > 0) ? p[0] : p[1]);
+ break;
+ default:
+// sprintf(g->Message, MSG(BAD_EXP_OPER), op);
+ strcpy(g->Message, "Function not supported");
+ return true;
+ } // endswitch op
+
+ return false;
+ } // end of Compute
+
+/***********************************************************************/
/* FormatValue: This function set vp (a STRING value) to the string */
/* constructed from its own value formated using the fmt format. */
/* This function assumes that the format matches the value type. */
@@ -2187,10 +2434,11 @@ static void TIME_to_localtime(struct tm *tm, const MYSQL_TIME *ltime)
tm->tm_year= ltime->year - 1900;
tm->tm_mon= ltime->month - 1;
tm->tm_mday= ltime->day;
+ mktime(tm); // set tm->tm_wday tm->yday fields to get proper day name (OB)
tm->tm_hour= ltime->hour;
tm->tm_min= ltime->minute;
tm->tm_sec= ltime->second;
-}
+} // end of TIME_to_localtime
// Added by Alexander Barkov
static struct tm *gmtime_mysql(const time_t *timep, struct tm *tm)
@@ -2199,7 +2447,7 @@ static struct tm *gmtime_mysql(const time_t *timep, struct tm *tm)
thd_gmt_sec_to_TIME(current_thd, &ltime, (my_time_t) *timep);
TIME_to_localtime(tm, &ltime);
return tm;
-}
+} // end of gmtime_mysql
/***********************************************************************/
/* GetGmTime: returns a pointer to a static tm structure obtained */
@@ -2511,6 +2759,8 @@ char *DTVAL::ShowValue(char *buf, int len)
if (!Null) {
size_t m, n = 0;
struct tm tm, *ptm = GetGmTime(&tm);
+
+
if (Len < len) {
p = buf;
@@ -2596,7 +2846,7 @@ bool DTVAL::WeekNum(PGLOBAL g, int& nval)
/***********************************************************************/
bool DTVAL::FormatValue(PVAL vp, char *fmt)
{
- char *buf = (char*)vp->GetTo_Val(); // Should be big enough
+ char *buf = (char*)vp->GetTo_Val(); // Should be big enough
struct tm tm, *ptm = GetGmTime(&tm);
if (trace > 1)
diff --git a/storage/connect/value.h b/storage/connect/value.h
index 3ce7027aeeb..c5aeb5c2a2f 100644
--- a/storage/connect/value.h
+++ b/storage/connect/value.h
@@ -29,12 +29,9 @@ enum CONV {CNV_ANY = 0, /* Convert to any type */
class CONSTANT; // For friend setting
typedef struct _datpar *PDTP; // For DTVAL
-
/***********************************************************************/
/* Utilities used to test types and to allocated values. */
/***********************************************************************/
-PVAL AllocateValue(PGLOBAL, void *, short);
-
// Exported functions
DllExport PSZ GetTypeName(int);
DllExport int GetTypeSize(int, int);
@@ -47,6 +44,7 @@ DllExport int GetFormatType(char);
DllExport bool IsTypeChar(int type);
DllExport bool IsTypeNum(int type);
DllExport int ConvertType(int, int, CONV, bool match = false);
+DllExport PVAL AllocateValue(PGLOBAL, void *, short, short = 2);
DllExport PVAL AllocateValue(PGLOBAL, PVAL, int = TYPE_VOID, int = 0);
DllExport PVAL AllocateValue(PGLOBAL, int, int len = 0, int prec = 0,
bool uns = false, PSZ fmt = NULL);
@@ -114,6 +112,7 @@ class DllExport VALUE : public BLOCK {
virtual char *ShowValue(char *buf, int len = 0) = 0;
virtual char *GetCharString(char *p) = 0;
virtual bool IsEqual(PVAL vp, bool chktype) = 0;
+ virtual bool Compute(PGLOBAL g, PVAL *vp, int np, OPVAL op);
virtual bool FormatValue(PVAL vp, char *fmt) = 0;
protected:
@@ -149,9 +148,9 @@ class DllExport TYPVAL : public VALUE {
virtual bool IsZero(void) {return Tval == 0;}
virtual void Reset(void) {Tval = 0;}
virtual int GetValLen(void);
- virtual int GetValPrec() {return 0;}
+ virtual int GetValPrec() {return Prec;}
virtual int GetSize(void) {return sizeof(TYPE);}
- virtual PSZ GetCharValue(void) {return VALUE::GetCharValue();}
+//virtual PSZ GetCharValue(void) {return VALUE::GetCharValue();}
virtual char GetTinyValue(void) {return (char)Tval;}
virtual uchar GetUTinyValue(void) {return (uchar)Tval;}
virtual short GetShortValue(void) {return (short)Tval;}
@@ -184,12 +183,18 @@ class DllExport TYPVAL : public VALUE {
virtual char *ShowValue(char *buf, int);
virtual char *GetCharString(char *p);
virtual bool IsEqual(PVAL vp, bool chktype);
+ virtual bool Compute(PGLOBAL g, PVAL *vp, int np, OPVAL op);
virtual bool SetConstFormat(PGLOBAL, FORMAT&);
virtual bool FormatValue(PVAL vp, char *fmt);
virtual void Print(PGLOBAL g, FILE *, uint);
virtual void Print(PGLOBAL g, char *, uint);
protected:
+ static TYPE MinMaxVal(bool b);
+ TYPE SafeAdd(TYPE n1, TYPE n2);
+ TYPE SafeMult(TYPE n1, TYPE n2);
+ bool Compall(PGLOBAL g, PVAL *vp, int np, OPVAL op);
+
// Default constructor not to be used
TYPVAL(void) : VALUE(TYPE_ERROR) {}
@@ -253,6 +258,7 @@ class DllExport TYPVAL<PSZ>: public VALUE {
virtual char *ShowValue(char *buf, int);
virtual char *GetCharString(char *p);
virtual bool IsEqual(PVAL vp, bool chktype);
+ virtual bool Compute(PGLOBAL g, PVAL *vp, int np, OPVAL op);
virtual bool FormatValue(PVAL vp, char *fmt);
virtual bool SetConstFormat(PGLOBAL, FORMAT&);
diff --git a/storage/connect/xtable.h b/storage/connect/xtable.h
index 49fbbb0de26..501a5e87cfa 100644
--- a/storage/connect/xtable.h
+++ b/storage/connect/xtable.h
@@ -209,6 +209,7 @@ class DllExport TDBASE : public TDB {
int Knum; // Size of key arrays
bool Read_Only; // True for read only tables
const CHARSET_INFO *m_data_charset;
+ const char *csname; // Table charset name
}; // end of class TDBASE
/***********************************************************************/
diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc
index 195b37d6814..2defcb89eb0 100644
--- a/storage/innobase/btr/btr0cur.cc
+++ b/storage/innobase/btr/btr0cur.cc
@@ -2365,6 +2365,38 @@ btr_cur_pess_upd_restore_supremum(
}
/*************************************************************//**
+Check if the total length of the modified blob for the row is within 10%
+of the total redo log size. This constraint on the blob length is to
+avoid overwriting the redo logs beyond the last checkpoint lsn.
+@return DB_SUCCESS or DB_TOO_BIG_RECORD. */
+static
+dberr_t
+btr_check_blob_limit(const big_rec_t* big_rec_vec)
+{
+ const ib_uint64_t redo_size = srv_n_log_files * srv_log_file_size
+ * UNIV_PAGE_SIZE;
+ const ulint redo_10p = redo_size / 10;
+ ulint total_blob_len = 0;
+ dberr_t err = DB_SUCCESS;
+
+ /* Calculate the total number of bytes for blob data */
+ for (ulint i = 0; i < big_rec_vec->n_fields; i++) {
+ total_blob_len += big_rec_vec->fields[i].len;
+ }
+
+ if (total_blob_len > redo_10p) {
+ ib_logf(IB_LOG_LEVEL_ERROR, "The total blob data"
+ " length (" ULINTPF ") is greater than"
+ " 10%% of the total redo log size (" UINT64PF
+ "). Please increase total redo log size.",
+ total_blob_len, redo_size);
+ err = DB_TOO_BIG_RECORD;
+ }
+
+ return(err);
+}
+
+/*************************************************************//**
Performs an update of a record on a page of a tree. It is assumed
that mtr holds an x-latch on the tree and on the cursor page. If the
update is made on the leaf level, to avoid deadlocks, mtr must also
@@ -2579,26 +2611,14 @@ make_external:
}
if (big_rec_vec) {
- const ulint redo_10p = srv_log_file_size * UNIV_PAGE_SIZE / 10;
- ulint total_blob_len = 0;
- /* Calculate the total number of bytes for blob data */
- for (ulint i = 0; i < big_rec_vec->n_fields; i++) {
- total_blob_len += big_rec_vec->fields[i].len;
- }
+ err = btr_check_blob_limit(big_rec_vec);
- if (total_blob_len > redo_10p) {
- ib_logf(IB_LOG_LEVEL_ERROR, "The total blob data"
- " length (" ULINTPF ") is greater than"
- " 10%% of the redo log file size (" UINT64PF
- "). Please increase innodb_log_file_size.",
- total_blob_len, srv_log_file_size);
+ if (err != DB_SUCCESS) {
if (n_reserved > 0) {
fil_space_release_free_extents(
index->space, n_reserved);
}
-
- err = DB_TOO_BIG_RECORD;
goto err_exit;
}
}
@@ -3844,7 +3864,7 @@ btr_estimate_number_of_different_key_vals(
ib_uint64_t* n_diff;
ib_uint64_t* n_not_null;
ibool stats_null_not_equal;
- ullint n_sample_pages; /* number of pages to sample */
+ ullint n_sample_pages = 1; /* number of pages to sample */
ulint not_empty_flag = 0;
ulint total_external_size = 0;
ulint i;
@@ -3897,25 +3917,57 @@ btr_estimate_number_of_different_key_vals(
if (srv_stats_transient_sample_pages > index->stat_index_size) {
if (index->stat_index_size > 0) {
n_sample_pages = index->stat_index_size;
- } else {
- n_sample_pages = 1;
}
} else {
n_sample_pages = srv_stats_transient_sample_pages;
}
} else {
- /* New logaritmic number of pages that are estimated. We
- first pick minimun from srv_stats_transient_sample_pages and number of
- pages on index. Then we pick maximum from previous number of
- pages and log2(number of index pages) * srv_stats_transient_sample_pages. */
- if (index->stat_index_size > 0) {
- n_sample_pages = ut_max(ut_min(srv_stats_transient_sample_pages, index->stat_index_size),
- log2(index->stat_index_size)*srv_stats_transient_sample_pages);
- } else {
- n_sample_pages = 1;
+ /* New logaritmic number of pages that are estimated.
+ Number of pages estimated should be between 1 and
+ index->stat_index_size.
+
+ If we have only 0 or 1 index pages then we can only take 1
+ sample. We have already initialized n_sample_pages to 1.
+
+ So taking index size as I and sample as S and log(I)*S as L
+
+ requirement 1) we want the out limit of the expression to not exceed I;
+ requirement 2) we want the ideal pages to be at least S;
+ so the current expression is min(I, max( min(S,I), L)
+
+ looking for simplifications:
+
+ case 1: assume S < I
+ min(I, max( min(S,I), L) -> min(I , max( S, L))
+
+ but since L=LOG2(I)*S and log2(I) >=1 L>S always so max(S,L) = L.
+
+ so we have: min(I , L)
+
+ case 2: assume I < S
+ min(I, max( min(S,I), L) -> min(I, max( I, L))
+
+ case 2a: L > I
+ min(I, max( I, L)) -> min(I, L) -> I
+
+ case 2b: when L < I
+ min(I, max( I, L)) -> min(I, I ) -> I
+
+ so taking all case2 paths is I, our expression is:
+ n_pages = S < I? min(I,L) : I
+ */
+ if (index->stat_index_size > 1) {
+ n_sample_pages = (srv_stats_transient_sample_pages < index->stat_index_size) ?
+ ut_min(index->stat_index_size,
+ log2(index->stat_index_size)*srv_stats_transient_sample_pages)
+ : index->stat_index_size;
+
}
}
+ /* Sanity check */
+ ut_ad(n_sample_pages > 0 && n_sample_pages <= (index->stat_index_size <= 1 ? 1 : index->stat_index_size));
+
/* We sample some pages in the index to get an estimate */
for (i = 0; i < n_sample_pages; i++) {
@@ -4438,7 +4490,6 @@ btr_store_big_rec_extern_fields(
buf_block_t** freed_pages = NULL;
ulint n_freed_pages = 0;
dberr_t error = DB_SUCCESS;
- ulint total_blob_len = 0;
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(rec_offs_any_extern(offsets));
@@ -4458,21 +4509,11 @@ btr_store_big_rec_extern_fields(
rec_page_no = buf_block_get_page_no(rec_block);
ut_a(fil_page_get_type(page_align(rec)) == FIL_PAGE_INDEX);
- const ulint redo_10p = (srv_log_file_size * UNIV_PAGE_SIZE / 10);
-
- /* Calculate the total number of bytes for blob data */
- for (ulint i = 0; i < big_rec_vec->n_fields; i++) {
- total_blob_len += big_rec_vec->fields[i].len;
- }
+ error = btr_check_blob_limit(big_rec_vec);
- if (total_blob_len > redo_10p) {
+ if (error != DB_SUCCESS) {
ut_ad(op == BTR_STORE_INSERT);
- ib_logf(IB_LOG_LEVEL_ERROR, "The total blob data length"
- " (" ULINTPF ") is greater than 10%% of the"
- " redo log file size (" UINT64PF "). Please"
- " increase innodb_log_file_size.",
- total_blob_len, srv_log_file_size);
- return(DB_TOO_BIG_RECORD);
+ return(error);
}
if (page_zip) {
diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc
index 9fceae0f880..f0b735546f3 100644
--- a/storage/innobase/buf/buf0buf.cc
+++ b/storage/innobase/buf/buf0buf.cc
@@ -564,9 +564,14 @@ buf_page_is_corrupted(
checksum_field2 = mach_read_from_4(
read_buf + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN_OLD_CHKSUM);
+#if FIL_PAGE_LSN % 8
+#error "FIL_PAGE_LSN must be 64 bit aligned"
+#endif
+
/* declare empty pages non-corrupted */
if (checksum_field1 == 0 && checksum_field2 == 0
- && mach_read_from_4(read_buf + FIL_PAGE_LSN) == 0) {
+ && *reinterpret_cast<const ib_uint64_t*>(read_buf +
+ FIL_PAGE_LSN) == 0) {
/* make sure that the page is really empty */
for (ulint i = 0; i < UNIV_PAGE_SIZE; i++) {
if (read_buf[i] != 0) {
diff --git a/storage/innobase/buf/buf0checksum.cc b/storage/innobase/buf/buf0checksum.cc
index ec79bbe6be9..4ba65d6f2d0 100644
--- a/storage/innobase/buf/buf0checksum.cc
+++ b/storage/innobase/buf/buf0checksum.cc
@@ -27,20 +27,21 @@ Created Aug 11, 2011 Vasil Dimov
#include "fil0fil.h" /* FIL_* */
#include "ut0crc32.h" /* ut_crc32() */
#include "ut0rnd.h" /* ut_fold_binary() */
+#include "buf0checksum.h"
#ifndef UNIV_INNOCHECKSUM
#include "srv0srv.h" /* SRV_CHECKSUM_* */
#include "buf0types.h"
+#endif /* !UNIV_INNOCHECKSUM */
+
/** the macro MYSQL_SYSVAR_ENUM() requires "long unsigned int" and if we
use srv_checksum_algorithm_t here then we get a compiler error:
ha_innodb.cc:12251: error: cannot convert 'srv_checksum_algorithm_t*' to
'long unsigned int*' in initialization */
UNIV_INTERN ulong srv_checksum_algorithm = SRV_CHECKSUM_ALGORITHM_INNODB;
-#endif /* !UNIV_INNOCHECKSUM */
-
/********************************************************************//**
Calculates a page CRC32 which is stored to the page when it is written
to a file. Note that we must be careful to calculate the same value on
diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc
index 6b219262207..f5145297b3f 100644
--- a/storage/innobase/buf/buf0flu.cc
+++ b/storage/innobase/buf/buf0flu.cc
@@ -837,39 +837,35 @@ buf_flush_init_for_writing(
case SRV_CHECKSUM_ALGORITHM_CRC32:
case SRV_CHECKSUM_ALGORITHM_STRICT_CRC32:
checksum = buf_calc_page_crc32(page);
+ mach_write_to_4(page + FIL_PAGE_SPACE_OR_CHKSUM, checksum);
break;
case SRV_CHECKSUM_ALGORITHM_INNODB:
case SRV_CHECKSUM_ALGORITHM_STRICT_INNODB:
checksum = (ib_uint32_t) buf_calc_page_new_checksum(page);
+ mach_write_to_4(page + FIL_PAGE_SPACE_OR_CHKSUM, checksum);
+ checksum = (ib_uint32_t) buf_calc_page_old_checksum(page);
break;
case SRV_CHECKSUM_ALGORITHM_NONE:
case SRV_CHECKSUM_ALGORITHM_STRICT_NONE:
checksum = BUF_NO_CHECKSUM_MAGIC;
+ mach_write_to_4(page + FIL_PAGE_SPACE_OR_CHKSUM, checksum);
break;
/* no default so the compiler will emit a warning if new enum
is added and not handled here */
}
- mach_write_to_4(page + FIL_PAGE_SPACE_OR_CHKSUM, checksum);
-
- /* We overwrite the first 4 bytes of the end lsn field to store
- the old formula checksum. Since it depends also on the field
- FIL_PAGE_SPACE_OR_CHKSUM, it has to be calculated after storing the
- new formula checksum. */
-
- if (srv_checksum_algorithm == SRV_CHECKSUM_ALGORITHM_STRICT_INNODB
- || srv_checksum_algorithm == SRV_CHECKSUM_ALGORITHM_INNODB) {
+ /* With the InnoDB checksum, we overwrite the first 4 bytes of
+ the end lsn field to store the old formula checksum. Since it
+ depends also on the field FIL_PAGE_SPACE_OR_CHKSUM, it has to
+ be calculated after storing the new formula checksum.
- checksum = (ib_uint32_t) buf_calc_page_old_checksum(page);
-
- /* In other cases we use the value assigned from above.
- If CRC32 is used then it is faster to use that checksum
- (calculated above) instead of calculating another one.
- We can afford to store something other than
- buf_calc_page_old_checksum() or BUF_NO_CHECKSUM_MAGIC in
- this field because the file will not be readable by old
- versions of MySQL/InnoDB anyway (older than MySQL 5.6.3) */
- }
+ In other cases we write the same value to both fields.
+ If CRC32 is used then it is faster to use that checksum
+ (calculated above) instead of calculating another one.
+ We can afford to store something other than
+ buf_calc_page_old_checksum() or BUF_NO_CHECKSUM_MAGIC in
+ this field because the file will not be readable by old
+ versions of MySQL/InnoDB anyway (older than MySQL 5.6.3) */
mach_write_to_4(page + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN_OLD_CHKSUM,
checksum);
diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc
index c6699296b36..ecefde7fd42 100644
--- a/storage/innobase/dict/dict0dict.cc
+++ b/storage/innobase/dict/dict0dict.cc
@@ -39,6 +39,16 @@ UNIV_INTERN dict_index_t* dict_ind_redundant;
/** dummy index for ROW_FORMAT=COMPACT supremum and infimum records */
UNIV_INTERN dict_index_t* dict_ind_compact;
+#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
+/** Flag to control insert buffer debugging. */
+extern UNIV_INTERN uint ibuf_debug;
+#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
+
+/**********************************************************************
+Issue a warning that the row is too big. */
+void
+ib_warn_row_too_big(const dict_table_t* table);
+
#ifndef UNIV_HOTBACKUP
#include "buf0buf.h"
#include "data0type.h"
@@ -2410,11 +2420,18 @@ dict_index_add_to_cache(
new_index->n_fields = new_index->n_def;
new_index->trx_id = index->trx_id;
- if (strict && dict_index_too_big_for_tree(table, new_index)) {
+ if (dict_index_too_big_for_tree(table, new_index)) {
+
+ if (strict) {
too_big:
- dict_mem_index_free(new_index);
- dict_mem_index_free(index);
- return(DB_TOO_BIG_RECORD);
+ dict_mem_index_free(new_index);
+ dict_mem_index_free(index);
+ return(DB_TOO_BIG_RECORD);
+ } else {
+
+ ib_warn_row_too_big(table);
+
+ }
}
if (dict_index_is_univ(index)) {
@@ -5743,11 +5760,11 @@ dict_set_corrupted(
dict_index_copy_types(tuple, sys_index, 2);
- btr_cur_search_to_nth_level(sys_index, 0, tuple, PAGE_CUR_GE,
+ btr_cur_search_to_nth_level(sys_index, 0, tuple, PAGE_CUR_LE,
BTR_MODIFY_LEAF,
&cursor, 0, __FILE__, __LINE__, &mtr);
- if (cursor.up_match == dtuple_get_n_fields(tuple)) {
+ if (cursor.low_match == dtuple_get_n_fields(tuple)) {
/* UPDATE SYS_INDEXES SET TYPE=index->type
WHERE TABLE_ID=index->table->id AND INDEX_ID=index->id */
ulint len;
diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc
index ba0476b1772..4175abdf895 100644
--- a/storage/innobase/fil/fil0fil.cc
+++ b/storage/innobase/fil/fil0fil.cc
@@ -55,6 +55,8 @@ Created 10/25/1995 Heikki Tuuri
static ulint srv_data_read, srv_data_written;
#endif /* !UNIV_HOTBACKUP */
+MYSQL_PLUGIN_IMPORT extern my_bool lower_case_file_system;
+
/*
IMPLEMENTATION OF THE TABLESPACE MEMORY CACHE
=============================================
@@ -1941,7 +1943,8 @@ UNIV_INTERN
ibool
fil_inc_pending_ops(
/*================*/
- ulint id) /*!< in: space id */
+ ulint id, /*!< in: space id */
+ ibool print_err) /*!< in: need to print error or not */
{
fil_space_t* space;
@@ -1950,10 +1953,12 @@ fil_inc_pending_ops(
space = fil_space_get_by_id(id);
if (space == NULL) {
- fprintf(stderr,
- "InnoDB: Error: trying to do an operation on a"
- " dropped tablespace %lu\n",
- (ulong) id);
+ if (print_err) {
+ fprintf(stderr,
+ "InnoDB: Error: trying to do an operation on a"
+ " dropped tablespace %lu\n",
+ (ulong) id);
+ }
}
if (space == NULL || space->stop_new_ops) {
@@ -4109,7 +4114,18 @@ fil_load_single_table_tablespace(
/* Build up the tablename in the standard form database/table. */
tablename = static_cast<char*>(
mem_alloc(dbname_len + filename_len + 2));
- sprintf(tablename, "%s/%s", dbname, filename);
+
+ /* When lower_case_table_names = 2 it is possible that the
+ dbname is in upper case ,but while storing it in fil_space_t
+ we must convert it into lower case */
+ sprintf(tablename, "%s" , dbname);
+ tablename[dbname_len] = '\0';
+
+ if (lower_case_file_system) {
+ dict_casedn_str(tablename);
+ }
+
+ sprintf(tablename+dbname_len,"/%s",filename);
tablename_len = strlen(tablename) - strlen(".ibd");
tablename[tablename_len] = '\0';
diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc
index ae61b77c6de..e1b9c95c26e 100644
--- a/storage/innobase/fts/fts0fts.cc
+++ b/storage/innobase/fts/fts0fts.cc
@@ -36,6 +36,7 @@ Full Text Search interface
#include "dict0priv.h"
#include "dict0stats.h"
#include "btr0pcur.h"
+#include <vector>
#include "ha_prototypes.h"
@@ -899,12 +900,14 @@ fts_drop_index(
index_cache = fts_find_index_cache(cache, index);
- if (index_cache->words) {
- fts_words_free(index_cache->words);
- rbt_free(index_cache->words);
- }
+ if (index_cache != NULL) {
+ if (index_cache->words) {
+ fts_words_free(index_cache->words);
+ rbt_free(index_cache->words);
+ }
- ib_vector_remove(cache->indexes, *(void**) index_cache);
+ ib_vector_remove(cache->indexes, *(void**) index_cache);
+ }
if (cache->get_docs) {
fts_reset_get_doc(cache);
@@ -1255,7 +1258,8 @@ fts_tokenizer_word_get(
#endif
/* If it is a stopword, do not index it */
- if (rbt_search(cache->stopword_info.cached_stopword,
+ if (cache->stopword_info.cached_stopword != NULL
+ && rbt_search(cache->stopword_info.cached_stopword,
&parent, text) == 0) {
return(NULL);
@@ -3558,6 +3562,12 @@ fts_add_doc_by_id(
rw_lock_x_lock(&table->fts->cache->lock);
+ if (table->fts->cache->stopword_info.status
+ & STOPWORD_NOT_INIT) {
+ fts_load_stopword(table, NULL, NULL,
+ NULL, TRUE, TRUE);
+ }
+
fts_cache_add_doc(
table->fts->cache,
get_doc->index_cache,
@@ -6072,8 +6082,6 @@ fts_update_hex_format_flag(
return (err);
}
-#ifdef _WIN32
-
/*********************************************************************//**
Rename an aux table to HEX format. It's called when "%016llu" is used
to format an object id in table name, which only happens in Windows. */
@@ -6170,8 +6178,8 @@ This function should make sure that either all the parent table and aux tables
are set DICT_TF2_FTS_AUX_HEX_NAME with flags2 or none of them are set */
static __attribute__((nonnull, warn_unused_result))
dberr_t
-fts_rename_aux_tables_to_hex_format(
-/*================================*/
+fts_rename_aux_tables_to_hex_format_low(
+/*====================================*/
trx_t* trx, /*!< in: transaction */
dict_table_t* parent_table, /*!< in: parent table */
ib_vector_t* tables) /*!< in: aux tables to rename. */
@@ -6295,12 +6303,14 @@ fts_rename_aux_tables_to_hex_format(
"table %s. Please revert manually.",
table->name);
fts_sql_rollback(trx_bg);
+ trx_free_for_background(trx_bg);
/* Continue to clear aux tables' flags2 */
not_rename = true;
continue;
}
fts_sql_commit(trx_bg);
+ trx_free_for_background(trx_bg);
}
DICT_TF2_FLAG_UNSET(parent_table, DICT_TF2_FTS_AUX_HEX_NAME);
@@ -6324,7 +6334,11 @@ fts_fake_hex_to_dec(
ret = sprintf(tmp_id, UINT64PFx, id);
ut_ad(ret == 16);
+#ifdef _WIN32
ret = sscanf(tmp_id, "%016llu", &dec_id);
+#else
+ ret = sscanf(tmp_id, "%016"PRIu64, &dec_id);
+#endif /* _WIN32 */
ut_ad(ret == 1);
return dec_id;
@@ -6346,7 +6360,293 @@ fts_check_aux_table_parent_id_cmp(
return static_cast<int>(fa1->parent_id - fa2->parent_id);
}
-#endif /* _WIN32 */
+/** Mark all the fts index associated with the parent table as corrupted.
+@param[in] trx transaction
+@param[in, out] parent_table fts index associated with this parent table
+ will be marked as corrupted. */
+static
+void
+fts_parent_all_index_set_corrupt(
+ trx_t* trx,
+ dict_table_t* parent_table)
+{
+ fts_t* fts = parent_table->fts;
+
+ if (trx_get_dict_operation(trx) == TRX_DICT_OP_NONE) {
+ trx_set_dict_operation(trx, TRX_DICT_OP_INDEX);
+ }
+
+ for (ulint j = 0; j < ib_vector_size(fts->indexes); j++) {
+ dict_index_t* index = static_cast<dict_index_t*>(
+ ib_vector_getp_const(fts->indexes, j));
+ dict_set_corrupted(index,
+ trx, "DROP ORPHANED TABLE");
+ }
+}
+
+/** Mark the fts index which index id matches the id as corrupted.
+@param[in] trx transaction
+@param[in] id index id to search
+@param[in, out] parent_table parent table to check with all
+ the index. */
+static
+void
+fts_set_index_corrupt(
+ trx_t* trx,
+ index_id_t id,
+ dict_table_t* table)
+{
+ fts_t* fts = table->fts;
+
+ if (trx_get_dict_operation(trx) == TRX_DICT_OP_NONE) {
+ trx_set_dict_operation(trx, TRX_DICT_OP_INDEX);
+ }
+
+ for (ulint j = 0; j < ib_vector_size(fts->indexes); j++) {
+ dict_index_t* index = static_cast<dict_index_t*>(
+ ib_vector_getp_const(fts->indexes, j));
+ if (index->id == id) {
+ dict_set_corrupted(index, trx,
+ "DROP ORPHANED TABLE");
+ break;
+ }
+ }
+}
+
+/** Check the index for the aux table is corrupted.
+@param[in] aux_table auxiliary table
+@retval nonzero if index is corrupted, zero for valid index */
+static
+ulint
+fts_check_corrupt_index(
+ fts_aux_table_t* aux_table)
+{
+ dict_table_t* table;
+ dict_index_t* index;
+ table = dict_table_open_on_id(
+ aux_table->parent_id, TRUE, DICT_TABLE_OP_NORMAL);
+
+ if (table == NULL) {
+ return(0);
+ }
+
+ for (index = UT_LIST_GET_FIRST(table->indexes);
+ index;
+ index = UT_LIST_GET_NEXT(indexes, index)) {
+ if (index->id == aux_table->index_id) {
+ ut_ad(index->type & DICT_FTS);
+ dict_table_close(table, true, false);
+ return(dict_index_is_corrupted(index));
+ }
+ }
+
+ dict_table_close(table, true, false);
+ return(0);
+}
+
+/** Check the validity of the parent table.
+@param[in] aux_table auxiliary table
+@return true if it is a valid table or false if it is not */
+static
+bool
+fts_valid_parent_table(
+ const fts_aux_table_t* aux_table)
+{
+ dict_table_t* parent_table;
+ bool valid = false;
+
+ parent_table = dict_table_open_on_id(
+ aux_table->parent_id, TRUE, DICT_TABLE_OP_NORMAL);
+
+ if (parent_table != NULL && parent_table->fts != NULL) {
+ if (aux_table->index_id == 0) {
+ valid = true;
+ } else {
+ index_id_t id = aux_table->index_id;
+ dict_index_t* index;
+
+ /* Search for the FT index in the table's list. */
+ for (index = UT_LIST_GET_FIRST(parent_table->indexes);
+ index;
+ index = UT_LIST_GET_NEXT(indexes, index)) {
+ if (index->id == id) {
+ valid = true;
+ break;
+ }
+
+ }
+ }
+ }
+
+ if (parent_table) {
+ dict_table_close(parent_table, TRUE, FALSE);
+ }
+
+ return(valid);
+}
+
+/** Try to rename all aux tables of the specified parent table.
+@param[in] aux_tables aux_tables to be renamed
+@param[in] parent_table parent table of all aux
+ tables stored in tables. */
+static
+void
+fts_rename_aux_tables_to_hex_format(
+ ib_vector_t* aux_tables,
+ dict_table_t* parent_table)
+{
+ dberr_t err;
+ trx_t* trx_rename = trx_allocate_for_background();
+ trx_rename->op_info = "Rename aux tables to hex format";
+ trx_rename->dict_operation_lock_mode = RW_X_LATCH;
+ trx_start_for_ddl(trx_rename, TRX_DICT_OP_TABLE);
+
+ err = fts_rename_aux_tables_to_hex_format_low(trx_rename,
+ parent_table, aux_tables);
+
+ trx_rename->dict_operation_lock_mode = 0;
+
+ if (err != DB_SUCCESS) {
+
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Rollback operations on all aux tables of table %s. "
+ "All the fts index associated with the table are "
+ "marked as corrupted. Please rebuild the "
+ "index again.", parent_table->name);
+ fts_sql_rollback(trx_rename);
+
+ /* Corrupting the fts index related to parent table. */
+ trx_t* trx_corrupt;
+ trx_corrupt = trx_allocate_for_background();
+ trx_corrupt->dict_operation_lock_mode = RW_X_LATCH;
+ trx_start_for_ddl(trx_corrupt, TRX_DICT_OP_TABLE);
+ fts_parent_all_index_set_corrupt(trx_corrupt, parent_table);
+ trx_corrupt->dict_operation_lock_mode = 0;
+ fts_sql_commit(trx_corrupt);
+ trx_free_for_background(trx_corrupt);
+ } else {
+ fts_sql_commit(trx_rename);
+ }
+
+ trx_free_for_background(trx_rename);
+ ib_vector_reset(aux_tables);
+}
+
+/** Set the hex format flag for the parent table.
+@param[in, out] parent_table parent table
+@param[in] trx transaction */
+static
+void
+fts_set_parent_hex_format_flag(
+ dict_table_t* parent_table,
+ trx_t* trx)
+{
+ if (!DICT_TF2_FLAG_IS_SET(parent_table,
+ DICT_TF2_FTS_AUX_HEX_NAME)) {
+ DBUG_EXECUTE_IF("parent_table_flag_fail",
+ ib_logf(IB_LOG_LEVEL_FATAL,
+ "Setting parent table %s to hex format "
+ "failed. Please try to restart the server "
+ "again, if it doesn't work, the system "
+ "tables might be corrupted.",
+ parent_table->name);
+ return;);
+
+ dberr_t err = fts_update_hex_format_flag(
+ trx, parent_table->id, true);
+
+ if (err != DB_SUCCESS) {
+ ib_logf(IB_LOG_LEVEL_FATAL,
+ "Setting parent table %s to hex format "
+ "failed. Please try to restart the server "
+ "again, if it doesn't work, the system "
+ "tables might be corrupted.",
+ parent_table->name);
+ } else {
+ DICT_TF2_FLAG_SET(
+ parent_table, DICT_TF2_FTS_AUX_HEX_NAME);
+ }
+ }
+}
+
+/** Drop the obsolete auxilary table.
+@param[in] tables tables to be dropped. */
+static
+void
+fts_drop_obsolete_aux_table_from_vector(
+ ib_vector_t* tables)
+{
+ dberr_t err;
+
+ for (ulint count = 0; count < ib_vector_size(tables);
+ ++count) {
+
+ fts_aux_table_t* aux_drop_table;
+ aux_drop_table = static_cast<fts_aux_table_t*>(
+ ib_vector_get(tables, count));
+ trx_t* trx_drop = trx_allocate_for_background();
+ trx_drop->op_info = "Drop obsolete aux tables";
+ trx_drop->dict_operation_lock_mode = RW_X_LATCH;
+ trx_start_for_ddl(trx_drop, TRX_DICT_OP_TABLE);
+
+ err = row_drop_table_for_mysql(
+ aux_drop_table->name, trx_drop, false, true);
+
+ trx_drop->dict_operation_lock_mode = 0;
+
+ if (err != DB_SUCCESS) {
+ /* We don't need to worry about the
+ failure, since server would try to
+ drop it on next restart, even if
+ the table was broken. */
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Fail to drop obsolete aux table '%s', which "
+ "is harmless. will try to drop it on next "
+ "restart.", aux_drop_table->name);
+ fts_sql_rollback(trx_drop);
+ } else {
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Dropped obsolete aux table '%s'.",
+ aux_drop_table->name);
+
+ fts_sql_commit(trx_drop);
+ }
+
+ trx_free_for_background(trx_drop);
+ }
+}
+
+/** Drop all the auxiliary table present in the vector.
+@param[in] trx transaction
+@param[in] tables tables to be dropped */
+static
+void
+fts_drop_aux_table_from_vector(
+ trx_t* trx,
+ ib_vector_t* tables)
+{
+ for (ulint count = 0; count < ib_vector_size(tables);
+ ++count) {
+ fts_aux_table_t* aux_drop_table;
+ aux_drop_table = static_cast<fts_aux_table_t*>(
+ ib_vector_get(tables, count));
+
+ /* Check for the validity of the parent table */
+ if (!fts_valid_parent_table(aux_drop_table)) {
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Parent table of FTS auxiliary table %s not "
+ "found.", aux_drop_table->name);
+ dberr_t err = fts_drop_table(trx, aux_drop_table->name);
+ if (err == DB_FAIL) {
+ char* path = fil_make_ibd_name(
+ aux_drop_table->name, false);
+ os_file_delete_if_exists(innodb_file_data_key,
+ path);
+ mem_free(path);
+ }
+ }
+ }
+}
/**********************************************************************//**
Check and drop all orphaned FTS auxiliary tables, those that don't have
@@ -6359,9 +6659,12 @@ fts_check_and_drop_orphaned_tables(
trx_t* trx, /*!< in: transaction */
ib_vector_t* tables) /*!< in: tables to check */
{
-#ifdef _WIN32
mem_heap_t* heap;
ib_vector_t* aux_tables_to_rename;
+ ib_vector_t* invalid_aux_tables;
+ ib_vector_t* valid_aux_tables;
+ ib_vector_t* drop_aux_tables;
+ ib_vector_t* obsolete_aux_tables;
ib_alloc_t* heap_alloc;
heap = mem_heap_create(1024);
@@ -6372,38 +6675,99 @@ fts_check_and_drop_orphaned_tables(
aux_tables_to_rename = ib_vector_create(heap_alloc,
sizeof(fts_aux_table_t), 128);
+ /* We store all fake auxiliary table and orphaned table here. */
+ invalid_aux_tables = ib_vector_create(heap_alloc,
+ sizeof(fts_aux_table_t), 128);
+
+ /* We store all valid aux tables. We use this to filter the
+ fake auxiliary table from invalid auxiliary tables. */
+ valid_aux_tables = ib_vector_create(heap_alloc,
+ sizeof(fts_aux_table_t), 128);
+
+ /* We store all auxiliary tables to be dropped. */
+ drop_aux_tables = ib_vector_create(heap_alloc,
+ sizeof(fts_aux_table_t), 128);
+
+ /* We store all obsolete auxiliary tables to be dropped. */
+ obsolete_aux_tables = ib_vector_create(heap_alloc,
+ sizeof(fts_aux_table_t), 128);
+
/* Sort by parent_id first, in case rename will fail */
ib_vector_sort(tables, fts_check_aux_table_parent_id_cmp);
-#endif /* _WIN32 */
for (ulint i = 0; i < ib_vector_size(tables); ++i) {
dict_table_t* parent_table;
fts_aux_table_t* aux_table;
bool drop = false;
-#ifdef _WIN32
dict_table_t* table;
fts_aux_table_t* next_aux_table = NULL;
ib_id_t orig_parent_id = 0;
+ ib_id_t orig_index_id = 0;
bool rename = false;
-#endif /* _WIN32 */
aux_table = static_cast<fts_aux_table_t*>(
ib_vector_get(tables, i));
-#ifdef _WIN32
table = dict_table_open_on_id(
aux_table->id, TRUE, DICT_TABLE_OP_NORMAL);
orig_parent_id = aux_table->parent_id;
+ orig_index_id = aux_table->index_id;
if (table == NULL || strcmp(table->name, aux_table->name)) {
- /* Skip these aux tables, which are common tables
- with wrong table ids */
- if (table) {
+
+ bool fake_aux = false;
+
+ if (table != NULL) {
dict_table_close(table, TRUE, FALSE);
}
- continue;
+ if (i + 1 < ib_vector_size(tables)) {
+ next_aux_table = static_cast<fts_aux_table_t*>(
+ ib_vector_get(tables, i + 1));
+ }
+
+ /* To know whether aux table is fake fts or
+ orphan fts table. */
+ for (ulint count = 0;
+ count < ib_vector_size(valid_aux_tables);
+ count++) {
+ fts_aux_table_t* valid_aux;
+ valid_aux = static_cast<fts_aux_table_t*>(
+ ib_vector_get(valid_aux_tables, count));
+ if (strcmp(valid_aux->name,
+ aux_table->name) == 0) {
+ fake_aux = true;
+ break;
+ }
+ }
+ /* All aux tables of parent table, whose id is
+ last_parent_id, have been checked, try to rename
+ them if necessary. */
+ if ((next_aux_table == NULL
+ || orig_parent_id != next_aux_table->parent_id)
+ && (!ib_vector_is_empty(aux_tables_to_rename))) {
+
+ ulint parent_id = fts_fake_hex_to_dec(
+ aux_table->parent_id);
+
+ parent_table = dict_table_open_on_id(
+ parent_id, TRUE,
+ DICT_TABLE_OP_NORMAL);
+
+ fts_rename_aux_tables_to_hex_format(
+ aux_tables_to_rename, parent_table);
+
+ dict_table_close(parent_table, TRUE,
+ FALSE);
+ }
+
+ /* If the aux table is fake aux table. Skip it. */
+ if (!fake_aux) {
+ ib_vector_push(invalid_aux_tables, aux_table);
+ }
+
+ continue;
} else if (!DICT_TF2_FLAG_IS_SET(table,
DICT_TF2_FTS_AUX_HEX_NAME)) {
@@ -6416,65 +6780,99 @@ fts_check_and_drop_orphaned_tables(
}
ut_ad(aux_table->id > aux_table->parent_id);
- rename = true;
- }
- if (table) {
- dict_table_close(table, TRUE, FALSE);
- }
-#endif /* _WIN32 */
+ /* Check whether parent table id and index id
+ are stored as decimal format. */
+ if (fts_valid_parent_table(aux_table)) {
- parent_table = dict_table_open_on_id(
- aux_table->parent_id, TRUE, DICT_TABLE_OP_NORMAL);
+ parent_table = dict_table_open_on_id(
+ aux_table->parent_id, true,
+ DICT_TABLE_OP_NORMAL);
- if (parent_table == NULL || parent_table->fts == NULL) {
+ ut_ad(parent_table != NULL);
+ ut_ad(parent_table->fts != NULL);
- drop = true;
-
- } else if (aux_table->index_id != 0) {
- index_id_t id;
- fts_t* fts;
+ if (!DICT_TF2_FLAG_IS_SET(
+ parent_table,
+ DICT_TF2_FTS_AUX_HEX_NAME)) {
+ rename = true;
+ }
- drop = true;
- fts = parent_table->fts;
- id = aux_table->index_id;
+ dict_table_close(parent_table, TRUE, FALSE);
+ }
- /* Search for the FT index in the table's list. */
- for (ulint j = 0;
- j < ib_vector_size(fts->indexes);
- ++j) {
+ if (!rename) {
+ /* Reassign the original value of
+ aux table if it is not in decimal format */
+ aux_table->parent_id = orig_parent_id;
+ aux_table->index_id = orig_index_id;
+ }
+ }
- const dict_index_t* index;
+ if (table != NULL) {
+ dict_table_close(table, true, false);
+ }
- index = static_cast<const dict_index_t*>(
- ib_vector_getp_const(fts->indexes, j));
+ if (!rename) {
+ /* Check the validity of the parent table. */
+ if (!fts_valid_parent_table(aux_table)) {
+ drop = true;
+ }
+ }
- if (index->id == id) {
- drop = false;
- break;
- }
+ /* Filter out the fake aux table by comparing with the
+ current valid auxiliary table name . */
+ for (ulint count = 0;
+ count < ib_vector_size(invalid_aux_tables); count++) {
+ fts_aux_table_t* invalid_aux;
+ invalid_aux = static_cast<fts_aux_table_t*>(
+ ib_vector_get(invalid_aux_tables, count));
+ if (strcmp(invalid_aux->name, aux_table->name) == 0) {
+ ib_vector_remove(
+ invalid_aux_tables,
+ *reinterpret_cast<void**>(invalid_aux));
+ break;
}
}
- if (drop) {
+ ib_vector_push(valid_aux_tables, aux_table);
- ib_logf(IB_LOG_LEVEL_WARN,
- "Parent table of FTS auxiliary table %s not "
- "found.", aux_table->name);
+ /* If the index associated with aux table is corrupted,
+ skip it. */
+ if (fts_check_corrupt_index(aux_table) > 0) {
- dberr_t err = fts_drop_table(trx, aux_table->name);
+ if (i + 1 < ib_vector_size(tables)) {
+ next_aux_table = static_cast<fts_aux_table_t*>(
+ ib_vector_get(tables, i + 1));
+ }
- if (err == DB_FAIL) {
- char* path;
+ if (next_aux_table == NULL
+ || orig_parent_id != next_aux_table->parent_id) {
- path = fil_make_ibd_name(
- aux_table->name, false);
+ parent_table = dict_table_open_on_id(
+ aux_table->parent_id, TRUE,
+ DICT_TABLE_OP_NORMAL);
- os_file_delete_if_exists(innodb_file_data_key,
- path);
+ if (!ib_vector_is_empty(aux_tables_to_rename)) {
+ fts_rename_aux_tables_to_hex_format(
+ aux_tables_to_rename, parent_table);
- mem_free(path);
+ } else {
+ fts_set_parent_hex_format_flag(
+ parent_table, trx);
+ }
+
+ dict_table_close(parent_table, TRUE, FALSE);
}
+
+ continue;
+ }
+
+ parent_table = dict_table_open_on_id(
+ aux_table->parent_id, TRUE, DICT_TABLE_OP_NORMAL);
+
+ if (drop) {
+ ib_vector_push(drop_aux_tables, aux_table);
} else {
if (FTS_IS_OBSOLETE_AUX_TABLE(aux_table->name)) {
@@ -6484,49 +6882,13 @@ fts_check_and_drop_orphaned_tables(
This could happen when we try to upgrade
from older server to later one, which doesn't
contain these obsolete tables. */
- drop = true;
-
- dberr_t err;
- trx_t* trx_drop =
- trx_allocate_for_background();
-
- trx_drop->op_info = "Drop obsolete aux tables";
- trx_drop->dict_operation_lock_mode = RW_X_LATCH;
-
- trx_start_for_ddl(trx_drop, TRX_DICT_OP_TABLE);
-
- err = row_drop_table_for_mysql(
- aux_table->name, trx_drop, false, true);
-
- trx_drop->dict_operation_lock_mode = 0;
-
- if (err != DB_SUCCESS) {
- /* We don't need to worry about the
- failure, since server would try to
- drop it on next restart, even if
- the table was broken. */
-
- ib_logf(IB_LOG_LEVEL_WARN,
- "Fail to drop obsolete aux"
- " table '%s', which is"
- " harmless. will try to drop"
- " it on next restart.",
- aux_table->name);
-
- fts_sql_rollback(trx_drop);
- } else {
- ib_logf(IB_LOG_LEVEL_INFO,
- "Dropped obsolete aux"
- " table '%s'.",
- aux_table->name);
-
- fts_sql_commit(trx_drop);
- }
-
- trx_free_for_background(trx_drop);
+ ib_vector_push(obsolete_aux_tables, aux_table);
+ continue;
}
}
-#ifdef _WIN32
+
+ /* If the aux table is in decimal format, we should
+ rename it, so push it to aux_tables_to_rename */
if (!drop && rename) {
ib_vector_push(aux_tables_to_rename, aux_table);
}
@@ -6544,38 +6906,16 @@ fts_check_and_drop_orphaned_tables(
them if necessary. We had better use a new background
trx to rename rather than the original trx, in case
any failure would cause a complete rollback. */
- dberr_t err;
- trx_t* trx_rename = trx_allocate_for_background();
- trx_rename->op_info = "Rename aux tables to "
- "hex format";
- trx_rename->dict_operation_lock_mode = RW_X_LATCH;
- trx_start_for_ddl(trx_rename, TRX_DICT_OP_TABLE);
+ ut_ad(rename);
+ ut_ad(!DICT_TF2_FLAG_IS_SET(
+ parent_table, DICT_TF2_FTS_AUX_HEX_NAME));
- err = fts_rename_aux_tables_to_hex_format(trx_rename,
- parent_table, aux_tables_to_rename);
-
- trx_rename->dict_operation_lock_mode = 0;
-
- if (err != DB_SUCCESS) {
- ib_logf(IB_LOG_LEVEL_WARN,
- "Rollback operations on all "
- "aux tables of table %s. "
- "Please check why renaming aux tables "
- "failed, and restart the server to "
- "upgrade again to "
- "get the table work.",
- parent_table->name);
-
- fts_sql_rollback(trx_rename);
- } else {
- fts_sql_commit(trx_rename);
- }
-
- trx_free_for_background(trx_rename);
- ib_vector_reset(aux_tables_to_rename);
+ fts_rename_aux_tables_to_hex_format(
+ aux_tables_to_rename,parent_table);
}
-#else /* _WIN32 */
- if (!drop) {
+
+ /* The IDs are already in correct hex format. */
+ if (!drop && !rename) {
dict_table_t* table;
table = dict_table_open_on_id(
@@ -6590,6 +6930,16 @@ fts_check_and_drop_orphaned_tables(
&& !DICT_TF2_FLAG_IS_SET(
table,
DICT_TF2_FTS_AUX_HEX_NAME)) {
+
+ DBUG_EXECUTE_IF("aux_table_flag_fail",
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Setting aux table %s to hex "
+ "format failed.", table->name);
+ fts_set_index_corrupt(
+ trx, aux_table->index_id,
+ parent_table);
+ goto table_exit;);
+
dberr_t err = fts_update_hex_format_flag(
trx, table->id, true);
@@ -6597,49 +6947,44 @@ fts_check_and_drop_orphaned_tables(
ib_logf(IB_LOG_LEVEL_WARN,
"Setting aux table %s to hex "
"format failed.", table->name);
+
+ fts_set_index_corrupt(
+ trx, aux_table->index_id,
+ parent_table);
} else {
DICT_TF2_FLAG_SET(table,
DICT_TF2_FTS_AUX_HEX_NAME);
}
}
+#ifndef DBUG_OFF
+table_exit:
+#endif /* !DBUG_OFF */
if (table != NULL) {
dict_table_close(table, TRUE, FALSE);
}
ut_ad(parent_table != NULL);
- if (!DICT_TF2_FLAG_IS_SET(parent_table,
- DICT_TF2_FTS_AUX_HEX_NAME)) {
- dberr_t err = fts_update_hex_format_flag(
- trx, parent_table->id, true);
- if (err != DB_SUCCESS) {
- ib_logf(IB_LOG_LEVEL_WARN,
- "Setting parent table %s of "
- "FTS auxiliary %s to hex "
- "format failed.",
- parent_table->name,
- aux_table->name);
- } else {
- DICT_TF2_FLAG_SET(parent_table,
- DICT_TF2_FTS_AUX_HEX_NAME);
- }
- }
+ fts_set_parent_hex_format_flag(
+ parent_table, trx);
}
-#endif /* _WIN32 */
-
- if (parent_table) {
+ if (parent_table != NULL) {
dict_table_close(parent_table, TRUE, FALSE);
}
}
-#ifdef _WIN32
+ fts_drop_aux_table_from_vector(trx, invalid_aux_tables);
+ fts_drop_aux_table_from_vector(trx, drop_aux_tables);
+ fts_sql_commit(trx);
+
+ fts_drop_obsolete_aux_table_from_vector(obsolete_aux_tables);
+
/* Free the memory allocated at the beginning */
if (heap != NULL) {
mem_heap_free(heap);
}
-#endif /* _WIN32 */
}
/**********************************************************************//**
@@ -6738,7 +7083,6 @@ fts_drop_orphaned_tables(void)
if (error == DB_SUCCESS) {
fts_check_and_drop_orphaned_tables(trx, tables);
- fts_sql_commit(trx);
break; /* Exit the loop. */
} else {
ib_vector_reset(tables);
diff --git a/storage/innobase/fts/fts0opt.cc b/storage/innobase/fts/fts0opt.cc
index 910a00cd521..2e2bd061d07 100644
--- a/storage/innobase/fts/fts0opt.cc
+++ b/storage/innobase/fts/fts0opt.cc
@@ -190,6 +190,8 @@ cycle for a table. */
struct fts_slot_t {
dict_table_t* table; /*!< Table to optimize */
+ table_id_t table_id; /*!< Table id */
+
fts_state_t state; /*!< State of this slot */
ulint added; /*!< Number of doc ids added since the
@@ -2575,6 +2577,8 @@ fts_optimize_add_table(
return;
}
+ ut_ad(table->cached && table->fts != NULL);
+
/* Make sure table with FTS index cannot be evicted */
if (table->can_be_evicted) {
dict_table_move_from_lru_to_non_lru(table);
@@ -2741,6 +2745,7 @@ fts_optimize_new_table(
memset(slot, 0x0, sizeof(*slot));
slot->table = table;
+ slot->table_id = table->id;
slot->state = FTS_STATE_LOADED;
slot->interval_time = FTS_OPTIMIZE_INTERVAL_IN_SECS;
@@ -2865,7 +2870,8 @@ fts_is_sync_needed(
slot = static_cast<const fts_slot_t*>(
ib_vector_get_const(tables, i));
- if (slot->table && slot->table->fts) {
+ if (slot->state != FTS_STATE_EMPTY && slot->table
+ && slot->table->fts) {
total_memory += slot->table->fts->cache->total_size;
}
@@ -2948,6 +2954,7 @@ fts_optimize_thread(
ib_wqueue_t* wq = (ib_wqueue_t*) arg;
ut_ad(!srv_read_only_mode);
+ my_thread_init();
heap = mem_heap_create(sizeof(dict_table_t*) * 64);
heap_alloc = ib_heap_allocator_create(heap);
@@ -3076,9 +3083,11 @@ fts_optimize_thread(
if (slot->state != FTS_STATE_EMPTY) {
dict_table_t* table = NULL;
- table = dict_table_open_on_name(
- slot->table->name, FALSE, FALSE,
- DICT_ERR_IGNORE_INDEX_ROOT);
+ /*slot->table may be freed, so we try to open
+ table by slot->table_id.*/
+ table = dict_table_open_on_id(
+ slot->table_id, FALSE,
+ DICT_TABLE_OP_NORMAL);
if (table) {
@@ -3101,6 +3110,7 @@ fts_optimize_thread(
ib_logf(IB_LOG_LEVEL_INFO, "FTS optimize thread exiting.");
os_event_set(exit_event);
+ my_thread_end();
/* We count the number of threads in os_thread_exit(). A created
thread should always use that to exit and not use return() to exit. */
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index d2077a5352f..2f7a953b939 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -2962,7 +2962,8 @@ innobase_init(
innobase_hton->flush_logs = innobase_flush_logs;
innobase_hton->show_status = innobase_show_status;
- innobase_hton->flags = HTON_SUPPORTS_EXTENDED_KEYS;
+ innobase_hton->flags =
+ HTON_SUPPORTS_EXTENDED_KEYS | HTON_SUPPORTS_FOREIGN_KEYS;
innobase_hton->release_temporary_latches =
innobase_release_temporary_latches;
@@ -13533,6 +13534,7 @@ ha_innobase::start_stmt(
thr_lock_type lock_type)
{
trx_t* trx;
+ DBUG_ENTER("ha_innobase::start_stmt");
update_thd(thd);
@@ -13556,6 +13558,29 @@ ha_innobase::start_stmt(
prebuilt->hint_need_to_fetch_extra_cols = 0;
reset_template();
+ if (dict_table_is_temporary(prebuilt->table)
+ && prebuilt->mysql_has_locked
+ && prebuilt->select_lock_type == LOCK_NONE) {
+ dberr_t error;
+
+ switch (thd_sql_command(thd)) {
+ case SQLCOM_INSERT:
+ case SQLCOM_UPDATE:
+ case SQLCOM_DELETE:
+ init_table_handle_for_HANDLER();
+ prebuilt->select_lock_type = LOCK_X;
+ prebuilt->stored_select_lock_type = LOCK_X;
+ error = row_lock_table_for_mysql(prebuilt, NULL, 1);
+
+ if (error != DB_SUCCESS) {
+ int st = convert_error_code_to_mysql(
+ error, 0, thd);
+ DBUG_RETURN(st);
+ }
+ break;
+ }
+ }
+
if (!prebuilt->mysql_has_locked) {
/* This handle is for a temporary table created inside
this same LOCK TABLES; since MySQL does NOT call external_lock
@@ -13593,7 +13618,7 @@ ha_innobase::start_stmt(
++trx->will_lock;
}
- return(0);
+ DBUG_RETURN(0);
}
/******************************************************************//**
@@ -19035,3 +19060,27 @@ innobase_convert_to_system_charset(
static_cast<uint>(len), errors));
}
+/**********************************************************************
+Issue a warning that the row is too big. */
+void
+ib_warn_row_too_big(const dict_table_t* table)
+{
+ /* If prefix is true then a 768-byte prefix is stored
+ locally for BLOB fields. Refer to dict_table_get_format() */
+ const bool prefix = (dict_tf_get_format(table->flags)
+ == UNIV_FORMAT_A);
+
+ const ulint free_space = page_get_free_space_of_empty(
+ table->flags & DICT_TF_COMPACT) / 2;
+
+ THD* thd = current_thd;
+
+ push_warning_printf(
+ thd, Sql_condition::WARN_LEVEL_WARN, HA_ERR_TO_BIG_ROW,
+ "Row size too large (> %lu). Changing some columns to TEXT"
+ " or BLOB %smay help. In current row format, BLOB prefix of"
+ " %d bytes is stored inline.", free_space
+ , prefix ? "or using ROW_FORMAT=DYNAMIC or"
+ " ROW_FORMAT=COMPRESSED ": ""
+ , prefix ? DICT_MAX_FIXED_COL_LEN : 0);
+}
diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc
index 728c3bb2cf5..e24e82ca472 100644
--- a/storage/innobase/handler/handler0alter.cc
+++ b/storage/innobase/handler/handler0alter.cc
@@ -3366,9 +3366,7 @@ ha_innobase::prepare_inplace_alter_table(
ulint fts_doc_col_no = ULINT_UNDEFINED;
bool add_fts_doc_id = false;
bool add_fts_doc_id_idx = false;
-#ifdef _WIN32
bool add_fts_idx = false;
-#endif /* _WIN32 */
DBUG_ENTER("prepare_inplace_alter_table");
DBUG_ASSERT(!ha_alter_info->handler_ctx);
@@ -3513,9 +3511,7 @@ check_if_ok_to_rename:
& ~(HA_FULLTEXT
| HA_PACK_KEY
| HA_BINARY_PACK_KEY)));
-#ifdef _WIN32
add_fts_idx = true;
-#endif /* _WIN32 */
continue;
}
@@ -3526,19 +3522,16 @@ check_if_ok_to_rename:
}
}
-#ifdef _WIN32
/* We won't be allowed to add fts index to a table with
fts indexes already but without AUX_HEX_NAME set.
This means the aux tables of the table failed to
rename to hex format but new created aux tables
- shall be in hex format, which is contradictory.
- It's only for Windows. */
+ shall be in hex format, which is contradictory. */
if (!DICT_TF2_FLAG_IS_SET(indexed_table, DICT_TF2_FTS_AUX_HEX_NAME)
&& indexed_table->fts != NULL && add_fts_idx) {
my_error(ER_INNODB_FT_AUX_NOT_HEX_ID, MYF(0));
goto err_exit_no_heap;
}
-#endif /* _WIN32 */
/* Check existing index definitions for too-long column
prefixes as well, in case max_col_len shrunk. */
diff --git a/storage/innobase/ibuf/ibuf0ibuf.cc b/storage/innobase/ibuf/ibuf0ibuf.cc
index 0bb898dbce5..2082c0d194b 100644
--- a/storage/innobase/ibuf/ibuf0ibuf.cc
+++ b/storage/innobase/ibuf/ibuf0ibuf.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1997, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1997, 2014, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -4614,7 +4614,7 @@ ibuf_merge_or_delete_for_page(
function. When the counter is > 0, that prevents tablespace
from being dropped. */
- tablespace_being_deleted = fil_inc_pending_ops(space);
+ tablespace_being_deleted = fil_inc_pending_ops(space, true);
if (UNIV_UNLIKELY(tablespace_being_deleted)) {
/* Do not try to read the bitmap page from space;
diff --git a/storage/innobase/include/btr0cur.ic b/storage/innobase/include/btr0cur.ic
index 080866c7465..43ee3304c0e 100644
--- a/storage/innobase/include/btr0cur.ic
+++ b/storage/innobase/include/btr0cur.ic
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2014, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -28,7 +28,7 @@ Created 10/16/1994 Heikki Tuuri
#ifdef UNIV_DEBUG
# define LIMIT_OPTIMISTIC_INSERT_DEBUG(NREC, CODE)\
-if (btr_cur_limit_optimistic_insert_debug\
+if (btr_cur_limit_optimistic_insert_debug > 1\
&& (NREC) >= (ulint)btr_cur_limit_optimistic_insert_debug) {\
CODE;\
}
diff --git a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h
index 31ec6b9ef8b..3097015999c 100644
--- a/storage/innobase/include/buf0buf.h
+++ b/storage/innobase/include/buf0buf.h
@@ -96,9 +96,6 @@ extern buf_block_t* back_block1; /*!< first block, for --apply-log */
extern buf_block_t* back_block2; /*!< second block, for page reorganize */
#endif /* !UNIV_HOTBACKUP */
-/** Magic value to use instead of checksums when they are disabled */
-#define BUF_NO_CHECKSUM_MAGIC 0xDEADBEEFUL
-
/** @brief States of a control block
@see buf_page_t
diff --git a/storage/innobase/include/buf0checksum.h b/storage/innobase/include/buf0checksum.h
index cd21781dc6e..6818345f965 100644
--- a/storage/innobase/include/buf0checksum.h
+++ b/storage/innobase/include/buf0checksum.h
@@ -28,11 +28,10 @@ Created Aug 11, 2011 Vasil Dimov
#include "univ.i"
-#ifndef UNIV_INNOCHECKSUM
-
#include "buf0types.h"
-#endif /* !UNIV_INNOCHECKSUM */
+/** Magic value to use instead of checksums when they are disabled */
+#define BUF_NO_CHECKSUM_MAGIC 0xDEADBEEFUL
/********************************************************************//**
Calculates a page CRC32 which is stored to the page when it is written
@@ -70,8 +69,6 @@ buf_calc_page_old_checksum(
/*=======================*/
const byte* page); /*!< in: buffer page */
-#ifndef UNIV_INNOCHECKSUM
-
/********************************************************************//**
Return a printable string describing the checksum algorithm.
@return algorithm name */
@@ -83,6 +80,4 @@ buf_checksum_algorithm_name(
extern ulong srv_checksum_algorithm;
-#endif /* !UNIV_INNOCHECKSUM */
-
#endif /* buf0checksum_h */
diff --git a/storage/innobase/include/dict0dict.h b/storage/innobase/include/dict0dict.h
index 026187b2000..d2514ea78b6 100644
--- a/storage/innobase/include/dict0dict.h
+++ b/storage/innobase/include/dict0dict.h
@@ -85,7 +85,7 @@ dict_get_referenced_table(
mem_heap_t* heap); /*!< in: heap memory */
/*********************************************************************//**
Frees a foreign key struct. */
-UNIV_INTERN
+
void
dict_foreign_free(
/*==============*/
diff --git a/storage/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h
index 798423eeddd..da2ee1c5730 100644
--- a/storage/innobase/include/fil0fil.h
+++ b/storage/innobase/include/fil0fil.h
@@ -65,11 +65,16 @@ of the address is FIL_NULL, the address is considered undefined. */
typedef byte fil_faddr_t; /*!< 'type' definition in C: an address
stored in a file page is a string of bytes */
+
+#endif /* !UNIV_INNOCHECKSUM */
+
#define FIL_ADDR_PAGE 0 /* first in address is the page offset */
#define FIL_ADDR_BYTE 4 /* then comes 2-byte byte offset within page*/
#define FIL_ADDR_SIZE 6 /* address size is 6 bytes */
+#ifndef UNIV_INNOCHECKSUM
+
/** File space address */
struct fil_addr_t{
ulint page; /*!< page number within a space */
@@ -140,8 +145,6 @@ extern fil_addr_t fil_addr_null;
#define FIL_PAGE_DATA_END 8 /*!< size of the page trailer */
/* @} */
-#ifndef UNIV_INNOCHECKSUM
-
/** File page types (values of FIL_PAGE_TYPE) @{ */
#define FIL_PAGE_INDEX 17855 /*!< B-tree node */
#define FIL_PAGE_UNDO_LOG 2 /*!< Undo log page */
@@ -166,6 +169,8 @@ extern fil_addr_t fil_addr_null;
#define FIL_LOG 502 /*!< redo log */
/* @} */
+#ifndef UNIV_INNOCHECKSUM
+
/** The number of fsyncs done to the log */
extern ulint fil_n_log_flushes;
@@ -585,7 +590,8 @@ UNIV_INTERN
ibool
fil_inc_pending_ops(
/*================*/
- ulint id); /*!< in: space id */
+ ulint id, /*!< in: space id */
+ ibool print_err); /*!< in: need to print error or not */
/*******************************************************************//**
Decrements the count of pending operations. */
UNIV_INTERN
diff --git a/storage/innobase/include/fts0priv.ic b/storage/innobase/include/fts0priv.ic
index 8ef877f267e..2d07c60f980 100644
--- a/storage/innobase/include/fts0priv.ic
+++ b/storage/innobase/include/fts0priv.ic
@@ -37,18 +37,38 @@ fts_write_object_id(
/* in: true for fixed hex format,
false for old ambiguous format */
{
+
#ifdef _WIN32
- /* Use this to construct old(5.6.14 and 5.7.3) ambiguous
- aux table names */
+
+ DBUG_EXECUTE_IF("innodb_test_wrong_non_windows_fts_aux_table_name",
+ return(sprintf(str, UINT64PFx, id)););
+
+ /* Use this to construct old(5.6.14 and 5.7.3) windows
+ ambiguous aux table names */
DBUG_EXECUTE_IF("innodb_test_wrong_fts_aux_table_name",
return(sprintf(str, "%016llu", id)););
+#else /* _WIN32 */
+
+ /* Use this to construct old(5.6.14 and 5.7.3) windows
+ ambiguous aux table names */
+ DBUG_EXECUTE_IF("innodb_test_wrong_windows_fts_aux_table_name",
+ return(sprintf(str, "%016"PRIu64, id)););
+
+ DBUG_EXECUTE_IF("innodb_test_wrong_fts_aux_table_name",
+ return(sprintf(str, UINT64PFx, id)););
+
+#endif /* _WIN32 */
+
/* As above, but this is only for those tables failing to rename. */
if (!hex_format) {
+#ifdef _WIN32
// FIXME: Use ut_snprintf(), so does following one.
return(sprintf(str, "%016llu", id));
- }
+#else /* _WIN32 */
+ return(sprintf(str, "%016"PRIu64, id));
#endif /* _WIN32 */
+ }
return(sprintf(str, UINT64PFx, id));
}
diff --git a/storage/innobase/include/mach0data.ic b/storage/innobase/include/mach0data.ic
index 7449d2da2b8..c46fcec107e 100644
--- a/storage/innobase/include/mach0data.ic
+++ b/storage/innobase/include/mach0data.ic
@@ -73,6 +73,8 @@ mach_write_to_2(
b[1] = (byte)(n);
}
+#endif /* !UNIV_INNOCHECKSUM */
+
/********************************************************//**
The following function is used to fetch data from 2 consecutive
bytes. The most significant byte is at the lowest address.
@@ -86,6 +88,8 @@ mach_read_from_2(
return(((ulint)(b[0]) << 8) | (ulint)(b[1]));
}
+#ifndef UNIV_INNOCHECKSUM
+
/********************************************************//**
The following function is used to convert a 16-bit data item
to the canonical format, for fast bytewise equality test
@@ -295,6 +299,8 @@ mach_write_to_8(
mach_write_to_4(static_cast<byte*>(b) + 4, (ulint) n);
}
+#endif /* !UNIV_INNOCHECKSUM */
+
/********************************************************//**
The following function is used to fetch data from 8 consecutive
bytes. The most significant byte is at the lowest address.
@@ -313,6 +319,8 @@ mach_read_from_8(
return(ull);
}
+#ifndef UNIV_INNOCHECKSUM
+
/*******************************************************//**
The following function is used to store data in 7 consecutive
bytes. We store the most significant byte to the lowest address. */
diff --git a/storage/innobase/include/os0file.h b/storage/innobase/include/os0file.h
index 5fea0023220..74d3c6bbc7c 100644
--- a/storage/innobase/include/os0file.h
+++ b/storage/innobase/include/os0file.h
@@ -139,6 +139,7 @@ enum os_file_create_t {
/* @} */
/** Error codes from os_file_get_last_error @{ */
+#define OS_FILE_NAME_TOO_LONG 36
#define OS_FILE_NOT_FOUND 71
#define OS_FILE_DISK_FULL 72
#define OS_FILE_ALREADY_EXISTS 73
diff --git a/storage/innobase/include/page0page.h b/storage/innobase/include/page0page.h
index b572f7abb49..cb6633bb941 100644
--- a/storage/innobase/include/page0page.h
+++ b/storage/innobase/include/page0page.h
@@ -29,6 +29,7 @@ Created 2/2/1994 Heikki Tuuri
#include "univ.i"
#include "page0types.h"
+#ifndef UNIV_INNOCHECKSUM
#include "fil0fil.h"
#include "buf0buf.h"
#include "data0data.h"
@@ -42,6 +43,8 @@ Created 2/2/1994 Heikki Tuuri
#define UNIV_INLINE
#endif
+#endif /* !UNIV_INNOCHECKSUM */
+
/* PAGE HEADER
===========
@@ -117,6 +120,8 @@ typedef byte page_header_t;
a new-style compact page */
/*-----------------------------*/
+#ifndef UNIV_INNOCHECKSUM
+
/* Heap numbers */
#define PAGE_HEAP_NO_INFIMUM 0 /* page infimum */
#define PAGE_HEAP_NO_SUPREMUM 1 /* page supremum */
@@ -343,6 +348,7 @@ page_cmp_dtuple_rec_with_match(
matched; when function returns contains the
value for current comparison */
#endif /* !UNIV_HOTBACKUP */
+#endif /* !UNIV_INNOCHECKSUM */
/*************************************************************//**
Gets the page number.
@return page number */
@@ -351,6 +357,7 @@ ulint
page_get_page_no(
/*=============*/
const page_t* page); /*!< in: page */
+#ifndef UNIV_INNOCHECKSUM
/*************************************************************//**
Gets the tablespace identifier.
@return space id */
@@ -359,6 +366,7 @@ ulint
page_get_space_id(
/*==============*/
const page_t* page); /*!< in: page */
+#endif /* !UNIV_INNOCHECKSUM */
/*************************************************************//**
Gets the number of user records on page (the infimum and supremum records
are not user records).
@@ -368,6 +376,7 @@ ulint
page_get_n_recs(
/*============*/
const page_t* page); /*!< in: index page */
+#ifndef UNIV_INNOCHECKSUM
/***************************************************************//**
Returns the number of records before the given record in chain.
The number includes infimum and supremum records.
@@ -516,6 +525,7 @@ ulint
page_rec_get_heap_no(
/*=================*/
const rec_t* rec); /*!< in: the physical record */
+#endif /* !UNIV_INNOCHECKSUM */
/************************************************************//**
Determine whether the page is a B-tree leaf.
@return true if the page is a B-tree leaf (PAGE_LEVEL = 0) */
@@ -525,6 +535,7 @@ page_is_leaf(
/*=========*/
const page_t* page) /*!< in: page */
__attribute__((nonnull, pure));
+#ifndef UNIV_INNOCHECKSUM
/************************************************************//**
Determine whether the page is empty.
@return true if the page is empty (PAGE_N_RECS = 0) */
@@ -1115,8 +1126,11 @@ page_find_rec_max_not_deleted(
#define UNIV_INLINE UNIV_INLINE_ORIGINAL
#endif
+#endif /* !UNIV_INNOCHECKSUM */
+
#ifndef UNIV_NONINL
#include "page0page.ic"
#endif
+
#endif
diff --git a/storage/innobase/include/page0page.ic b/storage/innobase/include/page0page.ic
index 9b81156708f..99e17001c0a 100644
--- a/storage/innobase/include/page0page.ic
+++ b/storage/innobase/include/page0page.ic
@@ -23,6 +23,8 @@ Index page routines
Created 2/2/1994 Heikki Tuuri
*******************************************************/
+#ifndef UNIV_INNOCHECKSUM
+
#include "mach0data.h"
#ifdef UNIV_DEBUG
# include "log0recv.h"
@@ -38,6 +40,7 @@ Created 2/2/1994 Heikki Tuuri
#define UNIV_INLINE
#endif
+#endif /* !UNIV_INNOCHECKSUM */
/************************************************************//**
Gets the start of a page.
@return start of the page */
@@ -49,6 +52,7 @@ page_align(
{
return((page_t*) ut_align_down(ptr, UNIV_PAGE_SIZE));
}
+#ifndef UNIV_INNOCHECKSUM
/************************************************************//**
Gets the offset within a page.
@return offset from the start of the page */
@@ -103,6 +107,7 @@ page_update_max_trx_id(
}
}
+#endif /* !UNIV_INNOCHECKSUM */
/*************************************************************//**
Reads the given header field. */
UNIV_INLINE
@@ -118,6 +123,7 @@ page_header_get_field(
return(mach_read_from_2(page + PAGE_HEADER + field));
}
+#ifndef UNIV_INNOCHECKSUM
/*************************************************************//**
Sets the given header field. */
UNIV_INLINE
@@ -223,6 +229,7 @@ page_header_reset_last_insert(
}
#endif /* !UNIV_HOTBACKUP */
+#endif /* !UNIV_INNOCHECKSUM */
/************************************************************//**
Determine whether the page is in new-style compact format.
@return nonzero if the page is in compact format, zero if it is in
@@ -236,6 +243,7 @@ page_is_comp(
return(page_header_get_field(page, PAGE_N_HEAP) & 0x8000);
}
+#ifndef UNIV_INNOCHECKSUM
/************************************************************//**
TRUE if the record is on a page in compact format.
@return nonzero if in compact format */
@@ -264,6 +272,7 @@ page_rec_get_heap_no(
}
}
+#endif /* !UNIV_INNOCHECKSUM */
/************************************************************//**
Determine whether the page is a B-tree leaf.
@return true if the page is a B-tree leaf (PAGE_LEVEL = 0) */
@@ -276,6 +285,7 @@ page_is_leaf(
return(!*(const uint16*) (page + (PAGE_HEADER + PAGE_LEVEL)));
}
+#ifndef UNIV_INNOCHECKSUM
/************************************************************//**
Determine whether the page is empty.
@return true if the page is empty (PAGE_N_RECS = 0) */
@@ -529,6 +539,7 @@ page_cmp_dtuple_rec_with_match(
}
#endif /* !UNIV_HOTBACKUP */
+#endif /* !UNIV_INNOCHECKSUM */
/*************************************************************//**
Gets the page number.
@return page number */
@@ -542,6 +553,7 @@ page_get_page_no(
return(mach_read_from_4(page + FIL_PAGE_OFFSET));
}
+#ifndef UNIV_INNOCHECKSUM
/*************************************************************//**
Gets the tablespace identifier.
@return space id */
@@ -555,6 +567,7 @@ page_get_space_id(
return(mach_read_from_4(page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID));
}
+#endif /* !UNIV_INNOCHECKSUM */
/*************************************************************//**
Gets the number of user records on page (infimum and supremum records
are not user records).
@@ -568,6 +581,7 @@ page_get_n_recs(
return(page_header_get_field(page, PAGE_N_RECS));
}
+#ifndef UNIV_INNOCHECKSUM
/*************************************************************//**
Gets the number of dir slots in directory.
@return number of slots */
@@ -958,6 +972,7 @@ page_rec_get_base_extra_size(
return(REC_N_NEW_EXTRA_BYTES + (ulint) !page_rec_is_comp(rec));
}
+#endif /* !UNIV_INNOCHECKSUM */
/************************************************************//**
Returns the sum of the sizes of the records in the record list, excluding
the infimum and supremum records.
@@ -981,7 +996,7 @@ page_get_data_size(
return(ret);
}
-
+#ifndef UNIV_INNOCHECKSUM
/************************************************************//**
Allocates a block of memory from the free list of an index page. */
UNIV_INLINE
@@ -1170,6 +1185,8 @@ page_mem_free(
}
}
+#endif /* !UNIV_INNOCHECKSUM */
+
#ifdef UNIV_MATERIALIZE
#undef UNIV_INLINE
#define UNIV_INLINE UNIV_INLINE_ORIGINAL
diff --git a/storage/innobase/include/page0types.h b/storage/innobase/include/page0types.h
index 95143a4bb44..fb9250a5a3b 100644
--- a/storage/innobase/include/page0types.h
+++ b/storage/innobase/include/page0types.h
@@ -33,6 +33,8 @@ using namespace std;
#include "univ.i"
#include "dict0types.h"
#include "mtr0types.h"
+#include "sync0types.h"
+#include "os0thread.h"
/** Eliminates a name collision on HP-UX */
#define page_t ib_page_t
diff --git a/storage/innobase/include/page0zip.h b/storage/innobase/include/page0zip.h
index 9d3b78ed2fc..6fe6934e35c 100644
--- a/storage/innobase/include/page0zip.h
+++ b/storage/innobase/include/page0zip.h
@@ -1,3 +1,4 @@
+
/*****************************************************************************
Copyright (c) 2005, 2013, Oracle and/or its affiliates. All Rights Reserved.
@@ -32,13 +33,15 @@ Created June 2005 by Marko Makela
# define UNIV_INLINE
#endif
-#include "mtr0types.h"
#include "page0types.h"
#include "buf0types.h"
+#ifndef UNIV_INNOCHECKSUM
+#include "mtr0types.h"
#include "dict0types.h"
#include "srv0srv.h"
#include "trx0types.h"
#include "mem0mem.h"
+#endif /* !UNIV_INNOCHECKSUM */
/* Compression level to be used by zlib. Settable by user. */
extern uint page_zip_level;
@@ -50,6 +53,7 @@ extern uint page_zip_level;
compression algorithm changes in zlib. */
extern my_bool page_zip_log_pages;
+#ifndef UNIV_INNOCHECKSUM
/**********************************************************************//**
Determine the size of a compressed page in bytes.
@return size in bytes */
@@ -112,6 +116,7 @@ page_zip_set_alloc(
/*===============*/
void* stream, /*!< in/out: zlib stream */
mem_heap_t* heap); /*!< in: memory heap to use */
+#endif /* !UNIV_INNOCHECKSUM */
/**********************************************************************//**
Compress a page.
@@ -147,6 +152,7 @@ page_zip_decompress(
after page creation */
__attribute__((nonnull(1,2)));
+#ifndef UNIV_INNOCHECKSUM
#ifdef UNIV_DEBUG
/**********************************************************************//**
Validate a compressed page descriptor.
@@ -158,6 +164,7 @@ page_zip_simple_validate(
const page_zip_des_t* page_zip); /*!< in: compressed page
descriptor */
#endif /* UNIV_DEBUG */
+#endif /* !UNIV_INNOCHECKSUM */
#ifdef UNIV_ZIP_DEBUG
/**********************************************************************//**
@@ -185,6 +192,7 @@ page_zip_validate(
__attribute__((nonnull(1,2)));
#endif /* UNIV_ZIP_DEBUG */
+#ifndef UNIV_INNOCHECKSUM
/**********************************************************************//**
Determine how big record can be inserted without recompressing the page.
@return a positive number indicating the maximum size of a record
@@ -418,6 +426,8 @@ page_zip_reorganize(
dict_index_t* index, /*!< in: index of the B-tree node */
mtr_t* mtr) /*!< in: mini-transaction */
__attribute__((nonnull));
+#endif /* !UNIV_INNOCHECKSUM */
+
#ifndef UNIV_HOTBACKUP
/**********************************************************************//**
Copy the records of a page byte for byte. Do not copy the page header
@@ -474,6 +484,8 @@ page_zip_verify_checksum(
/*=====================*/
const void* data, /*!< in: compressed page */
ulint size); /*!< in: size of compressed page */
+
+#ifndef UNIV_INNOCHECKSUM
/**********************************************************************//**
Write a log record of compressing an index page without the data on the page. */
UNIV_INLINE
@@ -506,6 +518,8 @@ void
page_zip_reset_stat_per_index();
/*===========================*/
+#endif /* !UNIV_INNOCHECKSUM */
+
#ifndef UNIV_HOTBACKUP
/** Check if a pointer to an uncompressed page matches a compressed page.
When we IMPORT a tablespace the blocks and accompanying frames are allocted
@@ -531,8 +545,10 @@ from outside the buffer pool.
# define UNIV_INLINE UNIV_INLINE_ORIGINAL
#endif
+#ifndef UNIV_INNOCHECKSUM
#ifndef UNIV_NONINL
# include "page0zip.ic"
#endif
+#endif /* !UNIV_INNOCHECKSUM */
#endif /* page0zip_h */
diff --git a/storage/innobase/include/rem0rec.h b/storage/innobase/include/rem0rec.h
index 238cb04e1f8..94453cc7b89 100644
--- a/storage/innobase/include/rem0rec.h
+++ b/storage/innobase/include/rem0rec.h
@@ -26,11 +26,13 @@ Created 5/30/1994 Heikki Tuuri
#ifndef rem0rec_h
#define rem0rec_h
+#ifndef UNIV_INNOCHECKSUM
#include "univ.i"
#include "data0data.h"
#include "rem0types.h"
#include "mtr0types.h"
#include "page0types.h"
+#endif /* !UNIV_INNOCHECKSUM */
/* Info bit denoting the predefined minimum record: this bit is set
if and only if the record is the first user record on a non-leaf
@@ -88,6 +90,7 @@ offsets[] array, first passed to rec_get_offsets() */
#define REC_OFFS_NORMAL_SIZE 100
#define REC_OFFS_SMALL_SIZE 10
+#ifndef UNIV_INNOCHECKSUM
/******************************************************//**
The following function is used to get the pointer of the next chained record
on the same page.
@@ -994,4 +997,5 @@ int wsrep_rec_get_foreign_key(
#include "rem0rec.ic"
#endif
+#endif /* !UNIV_INNOCHECKSUM */
#endif
diff --git a/storage/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h
index c0dc7e7ccc6..13f87262f8b 100644
--- a/storage/innobase/include/srv0srv.h
+++ b/storage/innobase/include/srv0srv.h
@@ -365,7 +365,6 @@ extern my_bool srv_stats_sample_traditional;
extern ibool srv_use_doublewrite_buf;
extern ulong srv_doublewrite_batch_size;
-extern ulong srv_checksum_algorithm;
extern ibool srv_use_atomic_writes;
#ifdef HAVE_POSIX_FALLOCATE
diff --git a/storage/innobase/include/trx0undo.h b/storage/innobase/include/trx0undo.h
index 660551961a6..45733921212 100644
--- a/storage/innobase/include/trx0undo.h
+++ b/storage/innobase/include/trx0undo.h
@@ -26,6 +26,8 @@ Created 3/26/1996 Heikki Tuuri
#ifndef trx0undo_h
#define trx0undo_h
+#ifndef UNIV_INNOCHECKSUM
+
#include "univ.i"
#include "trx0types.h"
#include "mtr0mtr.h"
@@ -385,6 +387,8 @@ trx_undo_mem_free(
/*==============*/
trx_undo_t* undo); /* in: the undo object to be freed */
+#endif /* !UNIV_INNOCHECKSUM */
+
/* Types of an undo log segment */
#define TRX_UNDO_INSERT 1 /* contains undo entries for inserts */
#define TRX_UNDO_UPDATE 2 /* contains undo entries for updates
@@ -403,6 +407,7 @@ trx_undo_mem_free(
prepared transaction */
#ifndef UNIV_HOTBACKUP
+#ifndef UNIV_INNOCHECKSUM
/** Transaction undo log memory object; this is protected by the undo_mutex
in the corresponding transaction object */
@@ -461,6 +466,7 @@ struct trx_undo_t{
/*!< undo log objects in the rollback
segment are chained into lists */
};
+#endif /* !UNIV_INNOCHECKSUM */
#endif /* !UNIV_HOTBACKUP */
/** The offset of the undo log page header on pages of the undo log */
@@ -588,8 +594,10 @@ quite a large overhead. */
with the XA XID */
/* @} */
+#ifndef UNIV_INNOCHECKSUM
#ifndef UNIV_NONINL
#include "trx0undo.ic"
#endif
+#endif /* !UNIV_INNOCHECKSUM */
#endif
diff --git a/storage/innobase/include/univ.i b/storage/innobase/include/univ.i
index eeeaca166a8..acfee420352 100644
--- a/storage/innobase/include/univ.i
+++ b/storage/innobase/include/univ.i
@@ -44,7 +44,7 @@ Created 1/20/1994 Heikki Tuuri
#define INNODB_VERSION_MAJOR 5
#define INNODB_VERSION_MINOR 6
-#define INNODB_VERSION_BUGFIX 21
+#define INNODB_VERSION_BUGFIX 22
/* The following is the InnoDB version as shown in
SELECT plugin_version FROM information_schema.plugins;
@@ -105,7 +105,7 @@ if we are compiling on Windows. */
/* Include the header file generated by GNU autoconf */
# ifndef __WIN__
# ifndef UNIV_HOTBACKUP
-# include "config.h"
+# include "my_config.h"
# endif /* UNIV_HOTBACKUP */
# endif
diff --git a/storage/innobase/include/ut0ut.h b/storage/innobase/include/ut0ut.h
index 7d1c3cd4f0b..939ccee6e3e 100644
--- a/storage/innobase/include/ut0ut.h
+++ b/storage/innobase/include/ut0ut.h
@@ -36,6 +36,8 @@ Created 1/20/1994 Heikki Tuuri
# include "os0sync.h" /* for HAVE_ATOMIC_BUILTINS */
#endif /* UNIV_HOTBACKUP */
+#endif /* !UNIV_INNOCHECKSUM */
+
#include <time.h>
#ifndef MYSQL_SERVER
#include <ctype.h>
@@ -64,6 +66,7 @@ private:
F& f;
};
+#ifndef UNIV_INNOCHECKSUM
#ifndef UNIV_HOTBACKUP
# if defined(HAVE_PAUSE_INSTRUCTION)
/* According to the gcc info page, asm volatile means that the
@@ -162,6 +165,7 @@ ut_pair_cmp(
ulint a2, /*!< in: less significant part of first pair */
ulint b1, /*!< in: more significant part of second pair */
ulint b2); /*!< in: less significant part of second pair */
+#endif /* !UNIV_INNOCHECKSUM */
/*************************************************************//**
Determines if a number is zero or a power of two.
@param n in: number
@@ -192,6 +196,7 @@ when m is a power of two. In other words, rounds n up to m * k.
@param m in: alignment, must be a power of two
@return n rounded up to the smallest possible integer multiple of m */
#define ut_calc_align(n, m) (((n) + ((m) - 1)) & ~((m) - 1))
+#ifndef UNIV_INNOCHECKSUM
/*************************************************************//**
Calculates fast the 2-logarithm of a number, rounded upward to an
integer.
diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc
index 97941c0e826..914b7f42f10 100644
--- a/storage/innobase/lock/lock0lock.cc
+++ b/storage/innobase/lock/lock0lock.cc
@@ -5917,6 +5917,7 @@ loop:
ulint space = lock->un_member.rec_lock.space;
ulint zip_size= fil_space_get_zip_size(space);
ulint page_no = lock->un_member.rec_lock.page_no;
+ ibool tablespace_being_deleted = FALSE;
if (UNIV_UNLIKELY(zip_size == ULINT_UNDEFINED)) {
@@ -5935,12 +5936,28 @@ loop:
lock_mutex_exit();
mutex_exit(&trx_sys->mutex);
- mtr_start(&mtr);
+ DEBUG_SYNC_C("innodb_monitor_before_lock_page_read");
- buf_page_get_with_no_latch(
- space, zip_size, page_no, &mtr);
+ /* Check if the space is exists or not. only when the space
+ is valid, try to get the page. */
+ tablespace_being_deleted = fil_inc_pending_ops(space, false);
- mtr_commit(&mtr);
+ if (!tablespace_being_deleted) {
+ mtr_start(&mtr);
+
+ buf_page_get_gen(space, zip_size, page_no,
+ RW_NO_LATCH, NULL,
+ BUF_GET_POSSIBLY_FREED,
+ __FILE__, __LINE__, &mtr);
+
+ mtr_commit(&mtr);
+
+ fil_decr_pending_ops(space);
+ } else {
+ fprintf(file, "RECORD LOCKS on"
+ " non-existing space %lu\n",
+ (ulong) space);
+ }
load_page_first = FALSE;
@@ -6366,7 +6383,7 @@ lock_rec_block_validate(
/* Make sure that the tablespace is not deleted while we are
trying to access the page. */
- if (!fil_inc_pending_ops(space)) {
+ if (!fil_inc_pending_ops(space, true)) {
mtr_start(&mtr);
block = buf_page_get_gen(
space, fil_space_get_zip_size(space),
diff --git a/storage/innobase/log/log0log.cc b/storage/innobase/log/log0log.cc
index 53794a0d773..d849b1a6329 100644
--- a/storage/innobase/log/log0log.cc
+++ b/storage/innobase/log/log0log.cc
@@ -3407,7 +3407,11 @@ loop:
lsn = log_sys->lsn;
- if (lsn != log_sys->last_checkpoint_lsn
+ ut_ad(srv_force_recovery != SRV_FORCE_NO_LOG_REDO
+ || lsn == log_sys->last_checkpoint_lsn + LOG_BLOCK_HDR_SIZE);
+
+ if ((srv_force_recovery != SRV_FORCE_NO_LOG_REDO
+ && lsn != log_sys->last_checkpoint_lsn)
#ifdef UNIV_LOG_ARCHIVE
|| (srv_log_archive_on
&& lsn != log_sys->archived_lsn + LOG_BLOCK_HDR_SIZE)
diff --git a/storage/innobase/os/os0file.cc b/storage/innobase/os/os0file.cc
index c41e24579e1..81590545abc 100644
--- a/storage/innobase/os/os0file.cc
+++ b/storage/innobase/os/os0file.cc
@@ -473,6 +473,8 @@ os_file_get_last_error_low(
return(OS_FILE_OPERATION_ABORTED);
} else if (err == ERROR_ACCESS_DENIED) {
return(OS_FILE_ACCESS_VIOLATION);
+ } else if (err == ERROR_BUFFER_OVERFLOW) {
+ return(OS_FILE_NAME_TOO_LONG);
} else {
return(OS_FILE_ERROR_MAX + err);
}
@@ -534,6 +536,8 @@ os_file_get_last_error_low(
return(OS_FILE_NOT_FOUND);
case EEXIST:
return(OS_FILE_ALREADY_EXISTS);
+ case ENAMETOOLONG:
+ return(OS_FILE_NAME_TOO_LONG);
case EXDEV:
case ENOTDIR:
case EISDIR:
@@ -2013,8 +2017,6 @@ os_file_close_func(
#ifdef __WIN__
BOOL ret;
- ut_a(file);
-
ret = CloseHandle(file);
if (ret) {
@@ -2052,8 +2054,6 @@ os_file_close_no_error_handling(
#ifdef __WIN__
BOOL ret;
- ut_a(file);
-
ret = CloseHandle(file);
if (ret) {
@@ -2281,8 +2281,6 @@ os_file_flush_func(
#ifdef __WIN__
BOOL ret;
- ut_a(file);
-
os_n_fsyncs++;
ret = FlushFileBuffers(file);
@@ -2614,7 +2612,6 @@ os_file_read_func(
os_bytes_read_since_printout += n;
try_again:
- ut_ad(file);
ut_ad(buf);
ut_ad(n > 0);
@@ -2741,7 +2738,6 @@ os_file_read_no_error_handling_func(
os_bytes_read_since_printout += n;
try_again:
- ut_ad(file);
ut_ad(buf);
ut_ad(n > 0);
@@ -2877,7 +2873,6 @@ os_file_write_func(
os_n_file_writes++;
- ut_ad(file);
ut_ad(buf);
ut_ad(n > 0);
retry:
@@ -3070,7 +3065,7 @@ os_file_status(
struct _stat64 statinfo;
ret = _stat64(path, &statinfo);
- if (ret && (errno == ENOENT || errno == ENOTDIR)) {
+ if (ret && (errno == ENOENT || errno == ENOTDIR || errno == ENAMETOOLONG)) {
/* file does not exist */
*exists = FALSE;
return(TRUE);
@@ -3098,7 +3093,7 @@ os_file_status(
struct stat statinfo;
ret = stat(path, &statinfo);
- if (ret && (errno == ENOENT || errno == ENOTDIR)) {
+ if (ret && (errno == ENOENT || errno == ENOTDIR || errno == ENAMETOOLONG)) {
/* file does not exist */
*exists = FALSE;
return(TRUE);
@@ -4598,7 +4593,6 @@ os_aio_func(
#endif /* WIN_ASYNC_IO */
ulint wake_later;
- ut_ad(file);
ut_ad(buf);
ut_ad(n > 0);
ut_ad(n % OS_FILE_LOG_BLOCK_SIZE == 0);
@@ -5476,12 +5470,12 @@ consecutive_loop:
aio_slot->offset, total_len);
}
- DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28_2",
- os_has_said_disk_full = FALSE;);
- DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28_2",
- ret = 0;);
- DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28_2",
+ if (aio_slot->type == OS_FILE_WRITE) {
+ DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28",
+ os_has_said_disk_full = FALSE;
+ ret = 0;
errno = 28;);
+ }
srv_set_io_thread_op_info(global_segment, "file i/o done");
diff --git a/storage/innobase/os/os0thread.cc b/storage/innobase/os/os0thread.cc
index 6736df46a87..a5b0f7de6ae 100644
--- a/storage/innobase/os/os0thread.cc
+++ b/storage/innobase/os/os0thread.cc
@@ -114,6 +114,9 @@ os_thread_create_func(
os_thread_id_t* thread_id) /*!< out: id of the created
thread, or NULL */
{
+ /* the new thread should look recent changes up here so far. */
+ os_wmb;
+
#ifdef __WIN__
os_thread_t thread;
DWORD win_thread_id;
@@ -132,10 +135,8 @@ os_thread_create_func(
if (thread_id) {
*thread_id = win_thread_id;
}
- if (thread) {
- CloseHandle(thread);
- }
- return((os_thread_t)win_thread_id);
+
+ return((os_thread_t)thread);
#else
int ret;
os_thread_t pthread;
diff --git a/storage/innobase/page/page0zip.cc b/storage/innobase/page/page0zip.cc
index 6989953cb0c..677a2ba4f70 100644
--- a/storage/innobase/page/page0zip.cc
+++ b/storage/innobase/page/page0zip.cc
@@ -36,6 +36,10 @@ using namespace std;
# include "page0zip.ic"
#endif
#undef THIS_MODULE
+#include "fil0fil.h"
+#include "buf0checksum.h"
+#include "mach0data.h"
+#ifndef UNIV_INNOCHECKSUM
#include "page0page.h"
#include "mtr0log.h"
#include "ut0sort.h"
@@ -43,15 +47,18 @@ using namespace std;
#include "btr0cur.h"
#include "page0types.h"
#include "log0recv.h"
+#endif /* !UNIV_INNOCHECKSUM */
#include "zlib.h"
#ifndef UNIV_HOTBACKUP
+#ifndef UNIV_INNOCHECKSUM
# include "buf0buf.h"
-# include "buf0lru.h"
# include "btr0sea.h"
# include "dict0boot.h"
# include "lock0lock.h"
-# include "srv0mon.h"
# include "srv0srv.h"
+#endif /* !UNIV_INNOCHECKSUM */
+# include "buf0lru.h"
+# include "srv0mon.h"
# include "ut0crc32.h"
#else /* !UNIV_HOTBACKUP */
# include "buf0checksum.h"
@@ -60,6 +67,7 @@ using namespace std;
#endif /* !UNIV_HOTBACKUP */
#ifndef UNIV_HOTBACKUP
+#ifndef UNIV_INNOCHECKSUM
/** Statistics on compression, indexed by page_zip_des_t::ssize - 1 */
UNIV_INTERN page_zip_stat_t page_zip_stat[PAGE_ZIP_SSIZE_MAX];
/** Statistics on compression, indexed by index->id */
@@ -69,6 +77,7 @@ UNIV_INTERN ib_mutex_t page_zip_stat_per_index_mutex;
#ifdef HAVE_PSI_INTERFACE
UNIV_INTERN mysql_pfs_key_t page_zip_stat_per_index_mutex_key;
#endif /* HAVE_PSI_INTERFACE */
+#endif /* !UNIV_INNOCHECKSUM */
#endif /* !UNIV_HOTBACKUP */
/* Compression level to be used by zlib. Settable by user. */
@@ -117,6 +126,7 @@ Compare at most sizeof(field_ref_zero) bytes.
/* Enable some extra debugging output. This code can be enabled
independently of any UNIV_ debugging conditions. */
+#ifndef UNIV_INNOCHECKSUM
#if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG
# include <stdarg.h>
__attribute__((format (printf, 1, 2)))
@@ -149,7 +159,9 @@ page_zip_fail_func(
@param fmt_args ignored: printf(3) format string and arguments */
# define page_zip_fail(fmt_args) /* empty */
#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */
+#endif /* !UNIV_INNOCHECKSUM */
+#ifndef UNIV_INNOCHECKSUM
#ifndef UNIV_HOTBACKUP
/**********************************************************************//**
Determine the guaranteed free space on an empty page.
@@ -4838,6 +4850,7 @@ corrupt:
return(ptr + 8 + size + trailer_size);
}
+#endif /* !UNIV_INNOCHECKSUM */
/**********************************************************************//**
Calculate the compressed page checksum.
@@ -4913,8 +4926,19 @@ page_zip_verify_checksum(
stored = static_cast<ib_uint32_t>(mach_read_from_4(
static_cast<const unsigned char*>(data) + FIL_PAGE_SPACE_OR_CHKSUM));
- /* declare empty pages non-corrupted */
- if (stored == 0) {
+#if FIL_PAGE_LSN % 8
+#error "FIL_PAGE_LSN must be 64 bit aligned"
+#endif
+
+#ifndef UNIV_INNOCHECKSUM
+ /* innochecksum doesn't compile with ut_d. Since we don't
+ need to check for empty pages when running innochecksum,
+ just don't include this code. */
+ /* Check if page is empty */
+ if (stored == 0
+ && *reinterpret_cast<const ib_uint64_t*>(static_cast<const char*>(
+ data)
+ + FIL_PAGE_LSN) == 0) {
/* make sure that the page is really empty */
ulint i;
for (i = 0; i < size; i++) {
@@ -4922,9 +4946,10 @@ page_zip_verify_checksum(
return(FALSE);
}
}
-
+ /* Empty page */
return(TRUE);
}
+#endif
calc = static_cast<ib_uint32_t>(page_zip_calc_checksum(
data, size, static_cast<srv_checksum_algorithm_t>(
diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc
index 1138aa410cc..2717d39b4c0 100644
--- a/storage/innobase/row/row0mysql.cc
+++ b/storage/innobase/row/row0mysql.cc
@@ -3792,6 +3792,10 @@ row_drop_table_for_mysql(
pars_info_t* info = NULL;
mem_heap_t* heap = NULL;
+ DBUG_ENTER("row_drop_table_for_mysql");
+
+ DBUG_PRINT("row_drop_table_for_mysql", ("table: %s", name));
+
ut_a(name != NULL);
if (srv_created_new_raw) {
@@ -3801,7 +3805,7 @@ row_drop_table_for_mysql(
"InnoDB: Shut down mysqld and edit my.cnf so that newraw"
" is replaced with raw.\n", stderr);
- return(DB_ERROR);
+ DBUG_RETURN(DB_ERROR);
}
/* The table name is prefixed with the database name and a '/'.
@@ -4429,7 +4433,7 @@ funct_exit:
srv_wake_master_thread();
- return(err);
+ DBUG_RETURN(err);
}
/*********************************************************************//**
diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc
index d1be5be9238..c56f90ff0a8 100644
--- a/storage/innobase/srv/srv0start.cc
+++ b/storage/innobase/srv/srv0start.cc
@@ -131,6 +131,17 @@ static ulint n[SRV_MAX_N_IO_THREADS + 6];
/** io_handler_thread identifiers, 32 is the maximum number of purge threads */
static os_thread_id_t thread_ids[SRV_MAX_N_IO_THREADS + 6 + 32];
+/** Thead handles */
+static os_thread_t thread_handles[SRV_MAX_N_IO_THREADS + 6 + 32];
+static os_thread_t buf_flush_page_cleaner_thread_handle;
+static os_thread_t buf_dump_thread_handle;
+static os_thread_t dict_stats_thread_handle;
+/** Status variables, is thread started ?*/
+static bool thread_started[SRV_MAX_N_IO_THREADS + 6 + 32] = {false};
+static bool buf_flush_page_cleaner_thread_started = false;
+static bool buf_dump_thread_started = false;
+static bool dict_stats_thread_started = false;
+
/** We use this mutex to test the return value of pthread_mutex_trylock
on successful locking. HP-UX does NOT return 0, though Linux et al do. */
static os_fast_mutex_t srv_os_test_mutex;
@@ -1983,7 +1994,8 @@ innobase_start_or_create_for_mysql(void)
n[i] = i;
- os_thread_create(io_handler_thread, n + i, thread_ids + i);
+ thread_handles[i] = os_thread_create(io_handler_thread, n + i, thread_ids + i);
+ thread_started[i] = true;
}
#ifdef UNIV_LOG_ARCHIVE
@@ -2647,19 +2659,22 @@ files_checked:
if (!srv_read_only_mode) {
/* Create the thread which watches the timeouts
for lock waits */
- os_thread_create(
+ thread_handles[2 + SRV_MAX_N_IO_THREADS] = os_thread_create(
lock_wait_timeout_thread,
NULL, thread_ids + 2 + SRV_MAX_N_IO_THREADS);
+ thread_started[2 + SRV_MAX_N_IO_THREADS] = true;
/* Create the thread which warns of long semaphore waits */
- os_thread_create(
+ thread_handles[3 + SRV_MAX_N_IO_THREADS] = os_thread_create(
srv_error_monitor_thread,
NULL, thread_ids + 3 + SRV_MAX_N_IO_THREADS);
+ thread_started[3 + SRV_MAX_N_IO_THREADS] = true;
/* Create the thread which prints InnoDB monitor info */
- os_thread_create(
+ thread_handles[4 + SRV_MAX_N_IO_THREADS] = os_thread_create(
srv_monitor_thread,
NULL, thread_ids + 4 + SRV_MAX_N_IO_THREADS);
+ thread_started[4 + SRV_MAX_N_IO_THREADS] = true;
}
/* Create the SYS_FOREIGN and SYS_FOREIGN_COLS system tables */
@@ -2686,26 +2701,30 @@ files_checked:
if (!srv_read_only_mode) {
- os_thread_create(
+ thread_handles[1 + SRV_MAX_N_IO_THREADS] = os_thread_create(
srv_master_thread,
NULL, thread_ids + (1 + SRV_MAX_N_IO_THREADS));
+ thread_started[1 + SRV_MAX_N_IO_THREADS] = true;
}
if (!srv_read_only_mode
&& srv_force_recovery < SRV_FORCE_NO_BACKGROUND) {
- os_thread_create(
+ thread_handles[5 + SRV_MAX_N_IO_THREADS] = os_thread_create(
srv_purge_coordinator_thread,
NULL, thread_ids + 5 + SRV_MAX_N_IO_THREADS);
+ thread_started[5 + SRV_MAX_N_IO_THREADS] = true;
+
ut_a(UT_ARR_SIZE(thread_ids)
> 5 + srv_n_purge_threads + SRV_MAX_N_IO_THREADS);
/* We've already created the purge coordinator thread above. */
for (i = 1; i < srv_n_purge_threads; ++i) {
- os_thread_create(
+ thread_handles[5 + i + SRV_MAX_N_IO_THREADS] = os_thread_create(
srv_worker_thread, NULL,
thread_ids + 5 + i + SRV_MAX_N_IO_THREADS);
+ thread_started[5 + i + SRV_MAX_N_IO_THREADS] = true;
}
srv_start_wait_for_purge_to_start();
@@ -2715,7 +2734,8 @@ files_checked:
}
if (!srv_read_only_mode) {
- os_thread_create(buf_flush_page_cleaner_thread, NULL, NULL);
+ buf_flush_page_cleaner_thread_handle = os_thread_create(buf_flush_page_cleaner_thread, NULL, NULL);
+ buf_flush_page_cleaner_thread_started = true;
}
#ifdef UNIV_DEBUG
@@ -2860,10 +2880,12 @@ files_checked:
if (!srv_read_only_mode) {
/* Create the buffer pool dump/load thread */
- os_thread_create(buf_dump_thread, NULL, NULL);
+ buf_dump_thread_handle = os_thread_create(buf_dump_thread, NULL, NULL);
+ buf_dump_thread_started = true;
/* Create the dict stats gathering thread */
- os_thread_create(dict_stats_thread, NULL, NULL);
+ dict_stats_thread_handle = os_thread_create(dict_stats_thread, NULL, NULL);
+ dict_stats_thread_started = true;
/* Create the thread that will optimize the FTS sub-system. */
fts_optimize_init();
@@ -3032,6 +3054,34 @@ innobase_shutdown_for_mysql(void)
dict_stats_thread_deinit();
}
+#ifdef __WIN__
+ /* MDEV-361: ha_innodb.dll leaks handles on Windows
+ MDEV-7403: should not pass recv_writer_thread_handle to
+ CloseHandle().
+
+ On Windows we should call CloseHandle() for all
+ open thread handles. */
+ if (os_thread_count == 0) {
+ for (i = 0; i < SRV_MAX_N_IO_THREADS + 6 + 32; ++i) {
+ if (thread_started[i]) {
+ CloseHandle(thread_handles[i]);
+ }
+ }
+
+ if (buf_flush_page_cleaner_thread_started) {
+ CloseHandle(buf_flush_page_cleaner_thread_handle);
+ }
+
+ if (buf_dump_thread_started) {
+ CloseHandle(buf_dump_thread_handle);
+ }
+
+ if (dict_stats_thread_started) {
+ CloseHandle(dict_stats_thread_handle);
+ }
+ }
+#endif /* __WIN __ */
+
/* This must be disabled before closing the buffer pool
and closing the data dictionary. */
btr_search_disable();
diff --git a/storage/innobase/sync/sync0rw.cc b/storage/innobase/sync/sync0rw.cc
index 8a211d81af5..4ff330791a0 100644
--- a/storage/innobase/sync/sync0rw.cc
+++ b/storage/innobase/sync/sync0rw.cc
@@ -286,6 +286,7 @@ rw_lock_free_func(
ib_mutex_t* mutex;
#endif /* !INNODB_RW_LOCKS_USE_ATOMICS */
+ os_rmb;
ut_ad(rw_lock_validate(lock));
ut_a(lock->lock_word == X_LOCK_DECR);
diff --git a/storage/maria/ha_maria.cc b/storage/maria/ha_maria.cc
index e80ce15165b..14ab6ec7599 100644
--- a/storage/maria/ha_maria.cc
+++ b/storage/maria/ha_maria.cc
@@ -1569,6 +1569,7 @@ int ha_maria::repair(THD *thd, HA_CHECK *param, bool do_optimize)
MARIA_SHARE *share= file->s;
ha_rows rows= file->state->records;
TRN *old_trn= file->trn;
+ my_bool locking= 0;
DBUG_ENTER("ha_maria::repair");
/*
@@ -1605,12 +1606,18 @@ int ha_maria::repair(THD *thd, HA_CHECK *param, bool do_optimize)
share->state.dupp_key= MI_MAX_KEY;
strmov(fixed_name, share->open_file_name.str);
- // Don't lock tables if we have used LOCK TABLE
- if (!thd->locked_tables_mode &&
- maria_lock_database(file, table->s->tmp_table ? F_EXTRA_LCK : F_WRLCK))
+ /*
+ Don't lock tables if we have used LOCK TABLE or if we come from
+ enable_index()
+ */
+ if (!thd->locked_tables_mode && ! (param->testflag & T_NO_LOCKS))
{
- _ma_check_print_error(param, ER(ER_CANT_LOCK), my_errno);
- DBUG_RETURN(HA_ADMIN_FAILED);
+ locking= 1;
+ if (maria_lock_database(file, table->s->tmp_table ? F_EXTRA_LCK : F_WRLCK))
+ {
+ _ma_check_print_error(param, ER(ER_CANT_LOCK), my_errno);
+ DBUG_RETURN(HA_ADMIN_FAILED);
+ }
}
if (!do_optimize ||
@@ -1746,7 +1753,7 @@ int ha_maria::repair(THD *thd, HA_CHECK *param, bool do_optimize)
mysql_mutex_unlock(&share->intern_lock);
thd_proc_info(thd, old_proc_info);
thd_progress_end(thd); // Mark done
- if (!thd->locked_tables_mode)
+ if (locking)
maria_lock_database(file, F_UNLCK);
/* Reset trn, that may have been set by repair */
@@ -1980,8 +1987,16 @@ int ha_maria::enable_indexes(uint mode)
param.op_name= "recreating_index";
param.testflag= (T_SILENT | T_REP_BY_SORT | T_QUICK |
T_CREATE_MISSING_KEYS | T_SAFE_REPAIR);
+ /*
+ Don't lock and unlock table if it's locked.
+ Normally table should be locked. This test is mostly for safety.
+ */
+ if (likely(file->lock_type != F_UNLCK))
+ param.testflag|= T_NO_LOCKS;
+
if (file->create_unique_index_by_sort)
param.testflag|= T_CREATE_UNIQUE_BY_SORT;
+
if (bulk_insert_single_undo == BULK_INSERT_SINGLE_UNDO_AND_NO_REPAIR)
{
bulk_insert_single_undo= BULK_INSERT_SINGLE_UNDO_AND_REPAIR;
@@ -2225,7 +2240,7 @@ bool ha_maria::check_and_repair(THD *thd)
{
/* Remove error about crashed table */
thd->get_stmt_da()->clear_warning_info(thd->query_id);
- push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_NOTE,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_CRASHED_ON_USAGE,
"Zerofilling moved table %s", table->s->path.str);
sql_print_information("Zerofilling moved table: '%s'",
diff --git a/storage/maria/ma_pagecache.c b/storage/maria/ma_pagecache.c
index bb085bbdc7a..ac66fdf6c57 100644
--- a/storage/maria/ma_pagecache.c
+++ b/storage/maria/ma_pagecache.c
@@ -1314,7 +1314,7 @@ static void link_block(PAGECACHE *pagecache, PAGECACHE_BLOCK_LINK *block,
struct st_my_thread_var *first_thread= last_thread->next;
struct st_my_thread_var *next_thread= first_thread;
PAGECACHE_HASH_LINK *hash_link=
- (PAGECACHE_HASH_LINK *) first_thread->opt_info;
+ (PAGECACHE_HASH_LINK *) first_thread->keycache_link;
struct st_my_thread_var *thread;
DBUG_ASSERT(block->requests + block->wlocks + block->rlocks +
@@ -1329,7 +1329,7 @@ static void link_block(PAGECACHE *pagecache, PAGECACHE_BLOCK_LINK *block,
We notify about the event all threads that ask
for the same page as the first thread in the queue
*/
- if ((PAGECACHE_HASH_LINK *) thread->opt_info == hash_link)
+ if ((PAGECACHE_HASH_LINK *) thread->keycache_link == hash_link)
{
DBUG_PRINT("signal", ("thread: %s %ld", thread->name, thread->id));
pagecache_pthread_cond_signal(&thread->suspend);
@@ -1642,7 +1642,7 @@ static void unlink_hash(PAGECACHE *pagecache, PAGECACHE_HASH_LINK *hash_link)
pagecache->waiting_for_hash_link.last_thread;
struct st_my_thread_var *first_thread= last_thread->next;
struct st_my_thread_var *next_thread= first_thread;
- PAGECACHE_PAGE *first_page= (PAGECACHE_PAGE *) (first_thread->opt_info);
+ PAGECACHE_PAGE *first_page= (PAGECACHE_PAGE *) (first_thread->keycache_link);
struct st_my_thread_var *thread;
hash_link->file= first_page->file;
@@ -1652,7 +1652,7 @@ static void unlink_hash(PAGECACHE *pagecache, PAGECACHE_HASH_LINK *hash_link)
{
PAGECACHE_PAGE *page;
thread= next_thread;
- page= (PAGECACHE_PAGE *) thread->opt_info;
+ page= (PAGECACHE_PAGE *) thread->keycache_link;
next_thread= thread->next;
/*
We notify about the event all threads that ask
@@ -1798,13 +1798,13 @@ restart:
PAGECACHE_PAGE page;
page.file= *file;
page.pageno= pageno;
- thread->opt_info= (void *) &page;
+ thread->keycache_link= (void *) &page;
wqueue_link_into_queue(&pagecache->waiting_for_hash_link, thread);
DBUG_PRINT("wait",
("suspend thread %s %ld", thread->name, thread->id));
pagecache_pthread_cond_wait(&thread->suspend,
&pagecache->cache_lock);
- thread->opt_info= NULL;
+ thread->keycache_link= NULL;
DBUG_PRINT("thread", ("restarting..."));
goto restart;
}
@@ -2067,7 +2067,7 @@ restart:
*/
struct st_my_thread_var *thread= my_thread_var;
- thread->opt_info= (void *) hash_link;
+ thread->keycache_link= (void *) hash_link;
wqueue_link_into_queue(&pagecache->waiting_for_block, thread);
do
{
@@ -2077,7 +2077,7 @@ restart:
&pagecache->cache_lock);
}
while (thread->next);
- thread->opt_info= NULL;
+ thread->keycache_link= NULL;
block= hash_link->block;
/* Ensure that the block is registered */
DBUG_ASSERT(block->requests >= 1);
@@ -5057,7 +5057,7 @@ static void pagecache_dump(PAGECACHE *pagecache)
do
{
thread= thread->next;
- page= (PAGECACHE_PAGE *) thread->opt_info;
+ page= (PAGECACHE_PAGE *) thread->keycache_link;
fprintf(pagecache_dump_file,
"thread: %s %ld, (file,pageno)=(%u,%lu)\n",
thread->name, thread->id,
@@ -5074,7 +5074,7 @@ static void pagecache_dump(PAGECACHE *pagecache)
do
{
thread=thread->next;
- hash_link= (PAGECACHE_HASH_LINK *) thread->opt_info;
+ hash_link= (PAGECACHE_HASH_LINK *) thread->keycache_link;
fprintf(pagecache_dump_file,
"thread: %s %u hash_link:%u (file,pageno)=(%u,%lu)\n",
thread->name, thread->id,
diff --git a/storage/myisam/ftbench/ft-test-run.sh b/storage/myisam/ftbench/ft-test-run.sh
index 17b13d73d47..08e03474dac 100755
--- a/storage/myisam/ftbench/ft-test-run.sh
+++ b/storage/myisam/ftbench/ft-test-run.sh
@@ -2,7 +2,7 @@
# Copyright (c) 2003, 2005, 2006 MySQL AB
# Use is subject to license terms
-
+#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; version 2
diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc
index f21845bae63..2fe519da794 100644
--- a/storage/myisam/ha_myisam.cc
+++ b/storage/myisam/ha_myisam.cc
@@ -1084,6 +1084,7 @@ int ha_myisam::repair(THD *thd, HA_CHECK &param, bool do_optimize)
char fixed_name[FN_REFLEN];
MYISAM_SHARE* share = file->s;
ha_rows rows= file->state->records;
+ my_bool locking= 0;
DBUG_ENTER("ha_myisam::repair");
param.db_name= table->s->db.str;
@@ -1098,12 +1099,18 @@ int ha_myisam::repair(THD *thd, HA_CHECK &param, bool do_optimize)
// Release latches since this can take a long time
ha_release_temporary_latches(thd);
- // Don't lock tables if we have used LOCK TABLE
- if (! thd->locked_tables_mode &&
- mi_lock_database(file, table->s->tmp_table ? F_EXTRA_LCK : F_WRLCK))
+ /*
+ Don't lock tables if we have used LOCK TABLE or if we come from
+ enable_index()
+ */
+ if (!thd->locked_tables_mode && ! (param.testflag & T_NO_LOCKS))
{
- mi_check_print_error(&param,ER(ER_CANT_LOCK),my_errno);
- DBUG_RETURN(HA_ADMIN_FAILED);
+ locking= 1;
+ if (mi_lock_database(file, table->s->tmp_table ? F_EXTRA_LCK : F_WRLCK))
+ {
+ mi_check_print_error(&param,ER(ER_CANT_LOCK),my_errno);
+ DBUG_RETURN(HA_ADMIN_FAILED);
+ }
}
if (!do_optimize ||
@@ -1226,7 +1233,7 @@ int ha_myisam::repair(THD *thd, HA_CHECK &param, bool do_optimize)
update_state_info(&param, file, 0);
}
thd_proc_info(thd, old_proc_info);
- if (! thd->locked_tables_mode)
+ if (locking)
mi_lock_database(file,F_UNLCK);
DBUG_RETURN(error ? HA_ADMIN_FAILED :
!optimize_done ? HA_ADMIN_ALREADY_DONE : HA_ADMIN_OK);
@@ -1458,8 +1465,16 @@ int ha_myisam::enable_indexes(uint mode)
param.op_name= "recreating_index";
param.testflag= (T_SILENT | T_REP_BY_SORT | T_QUICK |
T_CREATE_MISSING_KEYS);
+ /*
+ Don't lock and unlock table if it's locked.
+ Normally table should be locked. This test is mostly for safety.
+ */
+ if (likely(file->lock_type != F_UNLCK))
+ param.testflag|= T_NO_LOCKS;
+
if (file->create_unique_index_by_sort)
param.testflag|= T_CREATE_UNIQUE_BY_SORT;
+
param.myf_rw&= ~MY_WAIT_IF_FULL;
param.sort_buffer_length= THDVAR(thd, sort_buffer_size);
param.stats_method= (enum_handler_stats_method)THDVAR(thd, stats_method);
@@ -1539,7 +1554,7 @@ int ha_myisam::indexes_are_disabled(void)
void ha_myisam::start_bulk_insert(ha_rows rows, uint flags)
{
DBUG_ENTER("ha_myisam::start_bulk_insert");
- THD *thd= current_thd;
+ THD *thd= table->in_use;
ulong size= MY_MIN(thd->variables.read_buff_size,
(ulong) (table->s->avg_row_length*rows));
DBUG_PRINT("info",("start_bulk_insert: rows %lu size %lu",
@@ -1613,7 +1628,7 @@ int ha_myisam::end_bulk_insert()
*/
if (((err= enable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE)) != 0) &&
- current_thd->killed)
+ table->in_use->killed)
{
delete_all_rows();
/* not crashed, despite being killed during repair */
diff --git a/storage/myisammrg/mysql-test/storage_engine/alter_table_online.rdiff b/storage/myisammrg/mysql-test/storage_engine/alter_table_online.rdiff
index c21211560e9..854a00cfd81 100644
--- a/storage/myisammrg/mysql-test/storage_engine/alter_table_online.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/alter_table_online.rdiff
@@ -1,44 +1,82 @@
---- suite/storage_engine/alter_table_online.result 2014-09-25 12:15:42.000000000 +0400
-+++ suite/storage_engine/alter_table_online.reject 2014-11-17 20:25:16.000000000 +0400
-@@ -9,20 +9,35 @@
- CREATE TEMPORARY TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
- INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c');
+--- suite/storage_engine/alter_table_online.result 2014-11-12 05:27:00.000000000 +0400
++++ suite/storage_engine/alter_table_online.reject 2014-12-05 20:42:25.000000000 +0400
+@@ -2,8 +2,35 @@
+ CREATE TABLE t1 (a <INT_COLUMN>, b <INT_COLUMN>, c <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ INSERT INTO t1 (a,b,c) VALUES (1,100,'a'),(2,200,'b'),(3,300,'c');
ALTER ONLINE TABLE t1 MODIFY b <INT_COLUMN> DEFAULT 5;
--ERROR HY000: Can't execute the given 'ALTER' command as online
-+# ERROR: Statement succeeded (expected results: ER_CANT_DO_ONLINE)
++ERROR 0A000: LOCK=NONE/SHARED is not supported for this operation. Try LOCK=EXCLUSIVE.
++# ERROR: Statement ended with errno 1845, errname ER_ALTER_OPERATION_NOT_SUPPORTED (expected to succeed)
++# ------------ UNEXPECTED RESULT ------------
++# The statement|command finished with ER_ALTER_OPERATION_NOT_SUPPORTED.
++# Functionality or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ ALTER ONLINE TABLE t1 CHANGE b new_name <INT_COLUMN>;
++ERROR 0A000: LOCK=NONE/SHARED is not supported for this operation. Try LOCK=EXCLUSIVE.
++# ERROR: Statement ended with errno 1845, errname ER_ALTER_OPERATION_NOT_SUPPORTED (expected to succeed)
++# ------------ UNEXPECTED RESULT ------------
++# The statement|command finished with ER_ALTER_OPERATION_NOT_SUPPORTED.
++# Functionality or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
+ ALTER ONLINE TABLE t1 COMMENT 'new comment';
++ERROR 0A000: LOCK=NONE/SHARED is not supported for this operation. Try LOCK=EXCLUSIVE.
++# ERROR: Statement ended with errno 1845, errname ER_ALTER_OPERATION_NOT_SUPPORTED (expected to succeed)
+# ------------ UNEXPECTED RESULT ------------
-+# The statement|command succeeded unexpectedly.
++# The statement|command finished with ER_ALTER_OPERATION_NOT_SUPPORTED.
+# Functionality or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
+# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
+# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
+# Also, this problem may cause a chain effect (more errors of different kinds in the test).
+# -------------------------------------------
+ ALTER ONLINE TABLE t1 RENAME TO t2;
+ ERROR 0A000: LOCK=NONE/SHARED is not supported for this operation. Try LOCK=EXCLUSIVE.
+ DROP TABLE IF EXISTS t2;
+@@ -12,10 +39,6 @@
+ CREATE TEMPORARY TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c');
+ ALTER ONLINE TABLE t1 MODIFY b <INT_COLUMN> DEFAULT 5;
+-Warnings:
+-Warning 1366 Incorrect integer value: 'a' for column 'b' at row 1
+-Warning 1366 Incorrect integer value: 'b' for column 'b' at row 2
+-Warning 1366 Incorrect integer value: 'c' for column 'b' at row 3
ALTER ONLINE TABLE t1 CHANGE b new_name <INT_COLUMN>;
--ERROR HY000: Can't execute the given 'ALTER' command as online
-+# ERROR: Statement succeeded (expected results: ER_CANT_DO_ONLINE)
ALTER ONLINE TABLE t1 COMMENT 'new comment';
--ERROR HY000: Can't execute the given 'ALTER' command as online
-+# ERROR: Statement succeeded (expected results: ER_CANT_DO_ONLINE)
ALTER ONLINE TABLE t1 RENAME TO t2;
--ERROR HY000: Can't execute the given 'ALTER' command as online
-+# ERROR: Statement succeeded (expected results: ER_CANT_DO_ONLINE)
- DROP TABLE t1;
-+ERROR 42S02: Unknown table 't1'
+@@ -23,12 +46,30 @@
CREATE TABLE t1 (a <INT_COLUMN>, b <INT_COLUMN>, c <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
INSERT INTO t1 (a,b,c) VALUES (1,100,'a'),(2,200,'b'),(3,300,'c');
ALTER ONLINE TABLE t1 DROP COLUMN b, ADD b <INT_COLUMN>;
--ERROR HY000: Can't execute the given 'ALTER' command as online
-+# ERROR: Statement succeeded (expected results: ER_CANT_DO_ONLINE)
++ERROR 0A000: LOCK=NONE/SHARED is not supported for this operation. Try LOCK=EXCLUSIVE.
++# ERROR: Statement ended with errno 1845, errname ER_ALTER_OPERATION_NOT_SUPPORTED (expected to succeed)
+# ------------ UNEXPECTED RESULT ------------
-+# The statement|command succeeded unexpectedly.
++# The statement|command finished with ER_ALTER_OPERATION_NOT_SUPPORTED.
+# Functionality or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
+# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
+# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
+# Also, this problem may cause a chain effect (more errors of different kinds in the test).
+# -------------------------------------------
ALTER ONLINE TABLE t1 MODIFY b BIGINT <CUSTOM_COL_OPTIONS>;
--ERROR HY000: Can't execute the given 'ALTER' command as online
-+# ERROR: Statement succeeded (expected results: ER_CANT_DO_ONLINE)
+-ERROR 0A000: LOCK=NONE is not supported. Reason: Cannot change column type INPLACE. Try LOCK=SHARED.
++ERROR 0A000: LOCK=NONE/SHARED is not supported for this operation. Try LOCK=EXCLUSIVE.
++# ERROR: Statement ended with errno 1845, errname ER_ALTER_OPERATION_NOT_SUPPORTED (expected results: ER_ALTER_OPERATION_NOT_SUPPORTED_REASON)
ALTER ONLINE TABLE t1 ENGINE=MEMORY;
- ERROR HY000: Can't execute the given 'ALTER' command as online
+ ERROR 0A000: LOCK=NONE is not supported. Reason: COPY algorithm requires a lock. Try LOCK=SHARED.
+ DROP TABLE t1;
+ CREATE TABLE t1 (a <INT_COLUMN>, b <INT_COLUMN>, c <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>;
+ ALTER ONLINE TABLE t1 ADD INDEX (b);
+-ALTER ONLINE TABLE t1 DROP INDEX b;
++ERROR 0A000: LOCK=NONE/SHARED is not supported for this operation. Try LOCK=EXCLUSIVE.
++# ERROR: Statement ended with errno 1845, errname ER_ALTER_OPERATION_NOT_SUPPORTED (expected to succeed)
++# ------------ UNEXPECTED RESULT ------------
++# The statement|command finished with ER_ALTER_OPERATION_NOT_SUPPORTED.
++# Adding an index or ALTER ONLINE or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors.
++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def.
++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped.
++# Also, this problem may cause a chain effect (more errors of different kinds in the test).
++# -------------------------------------------
DROP TABLE t1;
diff --git a/storage/oqgraph/CMakeLists.txt b/storage/oqgraph/CMakeLists.txt
index 151082469a9..1a59ae0f0dc 100644
--- a/storage/oqgraph/CMakeLists.txt
+++ b/storage/oqgraph/CMakeLists.txt
@@ -1,7 +1,7 @@
set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
MESSAGE(STATUS "Configuring OQGraph")
-FIND_PACKAGE(Boost)
+FIND_PACKAGE(Boost 1.40.0)
IF(NOT Boost_FOUND)
MESSAGE(STATUS "Boost not found. OQGraph will not be compiled")
RETURN()
diff --git a/storage/perfschema/gen_pfs_lex_token.cc b/storage/perfschema/gen_pfs_lex_token.cc
index 7581255b284..c67f2920b75 100644
--- a/storage/perfschema/gen_pfs_lex_token.cc
+++ b/storage/perfschema/gen_pfs_lex_token.cc
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -36,10 +36,13 @@
See also YYMAXUTOK.
*/
#define MY_MAX_TOKEN 1000
+/** Generated token. */
struct gen_lex_token_string
{
const char *m_token_string;
int m_token_length;
+ bool m_append_space;
+ bool m_start_expr;
};
gen_lex_token_string compiled_token_array[MY_MAX_TOKEN];
@@ -76,6 +79,13 @@ void set_token(int tok, const char *str)
compiled_token_array[tok].m_token_string= str;
compiled_token_array[tok].m_token_length= strlen(str);
+ compiled_token_array[tok].m_append_space= true;
+ compiled_token_array[tok].m_start_expr= false;
+}
+
+void set_start_expr_token(int tok)
+{
+ compiled_token_array[tok].m_start_expr= true;
}
void compute_tokens()
@@ -91,6 +101,8 @@ void compute_tokens()
{
compiled_token_array[tok].m_token_string= "(unknown)";
compiled_token_array[tok].m_token_length= 9;
+ compiled_token_array[tok].m_append_space= true;
+ compiled_token_array[tok].m_start_expr= false;
}
/*
@@ -102,6 +114,7 @@ void compute_tokens()
str[0]= (char) tok;
compiled_token_array[tok].m_token_string= str;
compiled_token_array[tok].m_token_length= 1;
+ compiled_token_array[tok].m_append_space= true;
}
max_token_seen= 255;
@@ -202,6 +215,71 @@ void compute_tokens()
max_token_seen++;
tok_pfs_unused= max_token_seen;
set_token(tok_pfs_unused, "UNUSED");
+
+ /*
+ Fix whitespace for some special tokens.
+ */
+
+ /*
+ The lexer parses "@@variable" as '@', '@', 'variable',
+ returning a token for '@' alone.
+
+ This is incorrect, '@' is not really a token,
+ because the syntax "@ @ variable" (with spaces) is not accepted:
+ The lexer keeps some internal state after the '@' fake token.
+
+ To work around this, digest text are printed as "@@variable".
+ */
+ compiled_token_array[(int) '@'].m_append_space= false;
+
+ /*
+ Define additional properties for tokens.
+
+ List all the token that are followed by an expression.
+ This is needed to differentiate unary from binary
+ '+' and '-' operators, because we want to:
+ - reduce <unary +> <NUM> to <?>,
+ - preserve <...> <binary +> <NUM> as is.
+ */
+ set_start_expr_token('(');
+ set_start_expr_token(',');
+ set_start_expr_token(EVERY_SYM);
+ set_start_expr_token(AT_SYM);
+ set_start_expr_token(STARTS_SYM);
+ set_start_expr_token(ENDS_SYM);
+ set_start_expr_token(DEFAULT);
+ set_start_expr_token(RETURN_SYM);
+ set_start_expr_token(IF);
+ set_start_expr_token(ELSEIF_SYM);
+ set_start_expr_token(CASE_SYM);
+ set_start_expr_token(WHEN_SYM);
+ set_start_expr_token(WHILE_SYM);
+ set_start_expr_token(UNTIL_SYM);
+ set_start_expr_token(SELECT_SYM);
+
+ set_start_expr_token(OR_SYM);
+ set_start_expr_token(OR2_SYM);
+ set_start_expr_token(XOR);
+ set_start_expr_token(AND_SYM);
+ set_start_expr_token(AND_AND_SYM);
+ set_start_expr_token(NOT_SYM);
+ set_start_expr_token(BETWEEN_SYM);
+ set_start_expr_token(LIKE);
+ set_start_expr_token(REGEXP);
+
+ set_start_expr_token('|');
+ set_start_expr_token('&');
+ set_start_expr_token(SHIFT_LEFT);
+ set_start_expr_token(SHIFT_RIGHT);
+ set_start_expr_token('+');
+ set_start_expr_token('-');
+ set_start_expr_token(INTERVAL_SYM);
+ set_start_expr_token('*');
+ set_start_expr_token('/');
+ set_start_expr_token('%');
+ set_start_expr_token(DIV_SYM);
+ set_start_expr_token(MOD_SYM);
+ set_start_expr_token('^');
}
void print_tokens()
@@ -214,20 +292,26 @@ void print_tokens()
for (tok= 0; tok<256; tok++)
{
- printf("/* %03d */ { \"\\x%02x\", 1},\n", tok, tok);
+ printf("/* %03d */ { \"\\x%02x\", 1, %s, %s},\n",
+ tok,
+ tok,
+ compiled_token_array[tok].m_append_space ? "true" : "false",
+ compiled_token_array[tok].m_start_expr ? "true" : "false");
}
printf("/* PART 2: named tokens. */\n");
for (tok= 256; tok<= max_token_seen; tok++)
{
- printf("/* %03d */ { \"%s\", %d},\n",
+ printf("/* %03d */ { \"%s\", %d, %s, %s},\n",
tok,
compiled_token_array[tok].m_token_string,
- compiled_token_array[tok].m_token_length);
+ compiled_token_array[tok].m_token_length,
+ compiled_token_array[tok].m_append_space ? "true" : "false",
+ compiled_token_array[tok].m_start_expr ? "true" : "false");
}
- printf("/* DUMMY */ { \"\", 0}\n");
+ printf("/* DUMMY */ { \"\", 0, false, false}\n");
printf("};\n");
printf("/* PFS specific tokens. */\n");
@@ -254,6 +338,8 @@ int main(int argc,char **argv)
printf("{\n");
printf(" const char *m_token_string;\n");
printf(" int m_token_length;\n");
+ printf(" bool m_append_space;\n");
+ printf(" bool m_start_expr;\n");
printf("};\n");
printf("typedef struct lex_token_string lex_token_string;\n");
diff --git a/storage/perfschema/pfs_digest.cc b/storage/perfschema/pfs_digest.cc
index 473f4edce7a..c3c52bbd56d 100644
--- a/storage/perfschema/pfs_digest.cc
+++ b/storage/perfschema/pfs_digest.cc
@@ -602,16 +602,67 @@ PSI_digest_locker* pfs_digest_add_token_v1(PSI_digest_locker *locker,
switch (token)
{
- case BIN_NUM:
+ case NUM:
+ case LONG_NUM:
+ case ULONGLONG_NUM:
case DECIMAL_NUM:
case FLOAT_NUM:
+ case BIN_NUM:
case HEX_NUM:
+ {
+ bool found_unary;
+ do
+ {
+ found_unary= false;
+ peek_last_two_tokens(digest_storage, state->m_last_id_index,
+ &last_token, &last_token2);
+
+ if ((last_token == '-') || (last_token == '+'))
+ {
+ /*
+ We need to differentiate:
+ - a <unary minus> operator
+ - a <unary plus> operator
+ from
+ - a <binary minus> operator
+ - a <binary plus> operator
+ to only reduce "a = -1" to "a = ?", and not change "b - 1" to "b ?"
+
+ Binary operators are found inside an expression,
+ while unary operators are found at the beginning of an expression, or after operators.
+
+ To achieve this, every token that is followed by an <expr> expression
+ in the SQL grammar is flagged.
+ See sql/sql_yacc.yy
+ See sql/gen_lex_token.cc
+
+ For example,
+ "(-1)" is parsed as "(", "-", NUM, ")", and lex_token_array["("].m_start_expr is true,
+ so reduction of the "-" NUM is done, the result is "(?)".
+ "(a-1)" is parsed as "(", ID, "-", NUM, ")", and lex_token_array[ID].m_start_expr is false,
+ so the operator is binary, no reduction is done, and the result is "(a-?)".
+ */
+ if (lex_token_array[last_token2].m_start_expr)
+ {
+ /*
+ REDUCE:
+ TOK_PFS_GENERIC_VALUE := (UNARY_PLUS | UNARY_MINUS) (NUM | LOG_NUM | ... | FLOAT_NUM)
+
+ REDUCE:
+ TOK_PFS_GENERIC_VALUE := (UNARY_PLUS | UNARY_MINUS) TOK_PFS_GENERIC_VALUE
+ */
+ token= TOK_PFS_GENERIC_VALUE;
+ digest_storage->m_byte_count-= PFS_SIZE_OF_A_TOKEN;
+ found_unary= true;
+ }
+ }
+ } while (found_unary);
+ }
+ /* fall through, for case NULL_SYM below */
case LEX_HOSTNAME:
- case LONG_NUM:
- case NUM:
case TEXT_STRING:
case NCHAR_STRING:
- case ULONGLONG_NUM:
+ case PARAM_MARKER:
{
/*
REDUCE:
diff --git a/storage/sphinx/mysql-test/sphinx/union-5539.result b/storage/sphinx/mysql-test/sphinx/union-5539.result
index 414bcce30e9..ab694b7db6a 100644
--- a/storage/sphinx/mysql-test/sphinx/union-5539.result
+++ b/storage/sphinx/mysql-test/sphinx/union-5539.result
@@ -1,6 +1,10 @@
create table ts (id bigint unsigned not null, w int not null, query varchar(255) not null, index(query)) engine=sphinx connection="sphinx://127.0.0.1:PORT/*";
SELECT a.* FROM (SELECT * FROM ts si WHERE si.query=';mode=extended2;limit=1000000;maxmatches=500') AS a UNION SELECT b.* FROM (SELECT * FROM ts si WHERE si.query='@* 123nothingtofind123;mode=extended2;limit=1000000;maxmatches=500') AS b;
id w query
+1 1 ;mode=extended2;limit=1000000;maxmatches=500
+2 1 ;mode=extended2;limit=1000000;maxmatches=500
+3 1 ;mode=extended2;limit=1000000;maxmatches=500
+4 1 ;mode=extended2;limit=1000000;maxmatches=500
SELECT a.* FROM (SELECT * FROM ts si WHERE si.query='@* 123nothingtofind123;mode=extended2;limit=1000000;maxmatches=500') AS a UNION SELECT b.* FROM (SELECT * FROM ts si WHERE si.query=';mode=extended2;limit=1000000;maxmatches=500') AS b;
id w query
1 1 ;mode=extended2;limit=1000000;maxmatches=500
diff --git a/storage/sphinx/mysql-test/sphinx/union-5539.test b/storage/sphinx/mysql-test/sphinx/union-5539.test
index ec73be1ab3e..94cc2c024fb 100644
--- a/storage/sphinx/mysql-test/sphinx/union-5539.test
+++ b/storage/sphinx/mysql-test/sphinx/union-5539.test
@@ -5,11 +5,6 @@
eval create table ts (id bigint unsigned not null, w int not null, query varchar(255) not null, index(query)) engine=sphinx connection="sphinx://127.0.0.1:$SPHINXSEARCH_PORT/*";
let $q1=SELECT * FROM ts si WHERE si.query=';mode=extended2;limit=1000000;maxmatches=500';
let $q2=SELECT * FROM ts si WHERE si.query='@* 123nothingtofind123;mode=extended2;limit=1000000;maxmatches=500';
-########################
-# BUG BUG BUG !!!
-# Note, the result below is incorrect! It should be updated when
-# MDEV-5539 is fixed upstream!!!
-########################
eval SELECT a.* FROM ($q1) AS a UNION SELECT b.* FROM ($q2) AS b;
eval SELECT a.* FROM ($q2) AS a UNION SELECT b.* FROM ($q1) AS b;
drop table ts;
diff --git a/storage/tokudb/CMakeLists.txt b/storage/tokudb/CMakeLists.txt
index 5f45b0c7270..69879aea78c 100644
--- a/storage/tokudb/CMakeLists.txt
+++ b/storage/tokudb/CMakeLists.txt
@@ -17,8 +17,15 @@ IF(NOT LIBJEMALLOC)
MESSAGE(WARNING "TokuDB is enabled, but jemalloc is not. This configuration is not supported")
ENDIF()
+IF (HAVE_WVLA)
+ SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-vla")
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-vla")
+ SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -Wno-vla")
+ SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -Wno-vla")
+ENDIF()
+
############################################
-SET(TOKUDB_VERSION "tokudb-7.5.3")
+SET(TOKUDB_VERSION "tokudb-7.5.4")
SET(TOKUDB_DEB_FILES "usr/lib/mysql/plugin/ha_tokudb.so\netc/mysql/conf.d/tokudb.cnf\nusr/bin/tokuftdump\nusr/share/doc/mariadb-galera-server-10.0/README-TOKUDB\nusr/share/doc/mariadb-galera-server-10.0/README.md" PARENT_SCOPE)
SET(USE_BDB OFF CACHE BOOL "")
MARK_AS_ADVANCED(BUILDNAME)
@@ -65,18 +72,34 @@ include(CheckCXXCompilerFlag)
macro(set_cflags_if_supported)
foreach(flag ${ARGN})
- check_c_compiler_flag(${flag} HAVE_C_${flag})
- if (HAVE_C_${flag})
+ string(REGEX REPLACE "-" "_" temp_flag ${flag})
+ check_c_compiler_flag(${flag} HAVE_C_${temp_flag})
+ if (HAVE_C_${temp_flag})
set(CMAKE_C_FLAGS "${flag} ${CMAKE_C_FLAGS}")
endif ()
- check_cxx_compiler_flag(${flag} HAVE_CXX_${flag})
- if (HAVE_CXX_${flag})
+ check_cxx_compiler_flag(${flag} HAVE_CXX_${temp_flag})
+ if (HAVE_CXX_${temp_flag})
set(CMAKE_CXX_FLAGS "${flag} ${CMAKE_CXX_FLAGS}")
endif ()
endforeach(flag)
endmacro(set_cflags_if_supported)
+macro(append_cflags_if_supported)
+ foreach(flag ${ARGN})
+ string(REGEX REPLACE "-" "_" temp_flag ${flag})
+ check_c_compiler_flag(${flag} HAVE_C_${temp_flag})
+ if (HAVE_C_${temp_flag})
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${flag}")
+ endif ()
+ check_cxx_compiler_flag(${flag} HAVE_CXX_${temp_flag})
+ if (HAVE_CXX_${temp_flag})
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${flag}")
+ endif ()
+ endforeach(flag)
+endmacro(append_cflags_if_supported)
+
set_cflags_if_supported(-Wno-missing-field-initializers)
+append_cflags_if_supported(-Wno-vla)
ADD_SUBDIRECTORY(ft-index)
diff --git a/storage/tokudb/README.md b/storage/tokudb/README.md
index dc7362a5124..2ab2e21a5a1 100644
--- a/storage/tokudb/README.md
+++ b/storage/tokudb/README.md
@@ -1,11 +1,10 @@
TokuDB
======
-TokuDB is a high-performance, write optimized, transactional storage engine for MySQL and
-MariaDB. For more details, see our [product page][products].
+TokuDB is a high-performance, write optimized, transactional storage engine for MySQL, MariaDB, and Percona Server.
+For more details, see our [product page][products].
-This repository contains the MySQL plugin that uses the [TokuFT][tokuft]
-core.
+This repository contains the MySQL plugin that uses the [TokuFT][tokuft] core.
There are also patches to the MySQL and MariaDB kernels, available in our
forks of [mysql][mysql] and [mariadb][mariadb].
@@ -15,23 +14,30 @@ forks of [mysql][mysql] and [mariadb][mariadb].
[mysql]: http://github.com/Tokutek/mysql
[mariadb]: http://github.com/Tokutek/mariadb
-
-Building
+Download
--------
+* [MySQL 5.5 + TokuDB](http://www.tokutek.com/tokudb-for-mysql/download-community/)
+* [MariaDB 5.5 + TokuDB](http://www.tokutek.com/tokudb-for-mysql/download-community/)
+* [MariaDB 10.0 + TokuDB](https://downloads.mariadb.org/)
+* [Percona Server 5.6 + TokuDB](http://www.percona.com/downloads/)
+
+Build
+-----
+
The `scripts/` directory contains a script that can be used to build a
working MySQL or MariaDB with Tokutek patches, and with the TokuDB storage
engine, called `make.mysql.bash`. This script will download copies of the
needed source code from github and build everything.
-To build MySQL 5.5.39 with TokuDB 7.5.2:
+To build MySQL 5.5.40 with TokuDB 7.5.3:
```sh
-scripts/make.mysql.bash --mysqlbuild=mysql-5.5.39-tokudb-7.5.2-linux-x86_64
+scripts/make.mysql.bash --mysqlbuild=mysql-5.5.40-tokudb-7.5.3-linux-x86_64
```
-To build MariaDB 5.5.39 with TokuDB 7.5.2:
+To build MariaDB 5.5.40 with TokuDB 7.5.3:
```sh
-scripts/make.mysql.bash --mysqlbuild=mariadb-5.5.39-tokudb-7.5.2-linux-x86_64
+scripts/make.mysql.bash --mysqlbuild=mariadb-5.5.40-tokudb-7.5.3-linux-x86_64
```
Before you start, make sure you have a C++11-compatible compiler (GCC >=
@@ -54,10 +60,10 @@ scripts/make.mysql.debug.env.bash
```
-Contributing
-------------
+Contribute
+----------
-Please report bugs in TokuDB here on github.
+Please report TokuDB bugs at https://tokutek.atlassian.net/browse/DB.
We have two publicly accessible mailing lists:
@@ -66,7 +72,7 @@ We have two publicly accessible mailing lists:
- tokudb-dev@googlegroups.com is for discussion of the development of
TokuDB.
-We are also available on IRC on freenode.net, in the #tokutek channel.
+We are on IRC on freenode.net, in the #tokutek channel.
License
@@ -74,7 +80,7 @@ License
TokuDB is available under the GPL version 2. See [COPYING][copying]
-The TokuKV component of TokuDB is available under the GPL version 2, with
+The TokuFT component of TokuDB is available under the GPL version 2, with
slight modifications. See [README-TOKUDB][license].
[copying]: http://github.com/Tokutek/tokudb-engine/blob/master/COPYING
diff --git a/storage/tokudb/doc2/sysbench.update.ma10.tokudb754.loglog.png b/storage/tokudb/doc2/sysbench.update.ma10.tokudb754.loglog.png
new file mode 100644
index 00000000000..99b15ff642f
--- /dev/null
+++ b/storage/tokudb/doc2/sysbench.update.ma10.tokudb754.loglog.png
Binary files differ
diff --git a/storage/tokudb/doc2/sysbench.update.ma10.tokudb754.png b/storage/tokudb/doc2/sysbench.update.ma10.tokudb754.png
new file mode 100644
index 00000000000..7297013b51f
--- /dev/null
+++ b/storage/tokudb/doc2/sysbench.update.ma10.tokudb754.png
Binary files differ
diff --git a/storage/tokudb/doc2/sysbench.update.ma55.tokudb753.binlog.png b/storage/tokudb/doc2/sysbench.update.ma55.tokudb753.binlog.png
new file mode 100644
index 00000000000..510f1811e5a
--- /dev/null
+++ b/storage/tokudb/doc2/sysbench.update.ma55.tokudb753.binlog.png
Binary files differ
diff --git a/storage/tokudb/doc2/sysbench.update.ma55.tokudb753.loglog.png b/storage/tokudb/doc2/sysbench.update.ma55.tokudb753.loglog.png
new file mode 100644
index 00000000000..81fecd37684
--- /dev/null
+++ b/storage/tokudb/doc2/sysbench.update.ma55.tokudb753.loglog.png
Binary files differ
diff --git a/storage/tokudb/doc2/sysbench.update.ma55.tokudb753.png b/storage/tokudb/doc2/sysbench.update.ma55.tokudb753.png
new file mode 100644
index 00000000000..e5ed5f90b63
--- /dev/null
+++ b/storage/tokudb/doc2/sysbench.update.ma55.tokudb753.png
Binary files differ
diff --git a/storage/tokudb/doc2/sysbench.update.my55.tokudb753.loglog.png b/storage/tokudb/doc2/sysbench.update.my55.tokudb753.loglog.png
new file mode 100644
index 00000000000..1fe55b07b19
--- /dev/null
+++ b/storage/tokudb/doc2/sysbench.update.my55.tokudb753.loglog.png
Binary files differ
diff --git a/storage/tokudb/doc2/sysbench.update.ps56.tokudb754.loglog.png b/storage/tokudb/doc2/sysbench.update.ps56.tokudb754.loglog.png
new file mode 100644
index 00000000000..52985276249
--- /dev/null
+++ b/storage/tokudb/doc2/sysbench.update.ps56.tokudb754.loglog.png
Binary files differ
diff --git a/storage/tokudb/doc2/sysbench.update.ps56.tokudb754.png b/storage/tokudb/doc2/sysbench.update.ps56.tokudb754.png
new file mode 100644
index 00000000000..39b1c591730
--- /dev/null
+++ b/storage/tokudb/doc2/sysbench.update.ps56.tokudb754.png
Binary files differ
diff --git a/storage/tokudb/ft-index/buildheader/make_tdb.cc b/storage/tokudb/ft-index/buildheader/make_tdb.cc
index 9890b8ed34b..88f8882df78 100644
--- a/storage/tokudb/ft-index/buildheader/make_tdb.cc
+++ b/storage/tokudb/ft-index/buildheader/make_tdb.cc
@@ -585,6 +585,7 @@ static void print_db_txn_struct (void) {
"uint64_t (*id64) (DB_TXN*)",
"void (*set_client_id)(DB_TXN *, uint64_t client_id)",
"uint64_t (*get_client_id)(DB_TXN *)",
+ "bool (*is_prepared)(DB_TXN *)",
NULL};
sort_and_dump_fields("db_txn", false, extra);
}
diff --git a/storage/tokudb/ft-index/ft/ft-internal.h b/storage/tokudb/ft-index/ft/ft-internal.h
index 3cd39705571..88fc5dca686 100644
--- a/storage/tokudb/ft-index/ft/ft-internal.h
+++ b/storage/tokudb/ft-index/ft/ft-internal.h
@@ -616,6 +616,7 @@ typedef enum {
FT_PRO_RIGHTMOST_LEAF_SHORTCUT_SUCCESS,
FT_PRO_RIGHTMOST_LEAF_SHORTCUT_FAIL_POS,
FT_PRO_RIGHTMOST_LEAF_SHORTCUT_FAIL_REACTIVE,
+ FT_CURSOR_SKIP_DELETED_LEAF_ENTRY, // how many deleted leaf entries were skipped by a cursor
FT_STATUS_NUM_ROWS
} ft_status_entry;
diff --git a/storage/tokudb/ft-index/ft/ft-ops.cc b/storage/tokudb/ft-index/ft/ft-ops.cc
index 481d80fdbe4..34c9c46f1c6 100644
--- a/storage/tokudb/ft-index/ft/ft-ops.cc
+++ b/storage/tokudb/ft-index/ft/ft-ops.cc
@@ -377,6 +377,8 @@ status_init(void)
STATUS_INIT(FT_PRO_RIGHTMOST_LEAF_SHORTCUT_FAIL_POS, nullptr, PARCOUNT, "promotion: tried the rightmost leaf shorcut but failed (out-of-bounds)", TOKU_ENGINE_STATUS);
STATUS_INIT(FT_PRO_RIGHTMOST_LEAF_SHORTCUT_FAIL_REACTIVE,nullptr, PARCOUNT, "promotion: tried the rightmost leaf shorcut but failed (child reactive)", TOKU_ENGINE_STATUS);
+ STATUS_INIT(FT_CURSOR_SKIP_DELETED_LEAF_ENTRY, CURSOR_SKIP_DELETED_LEAF_ENTRY, PARCOUNT, "cursor skipped deleted leaf entries", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS);
+
ft_status.initialized = true;
}
static void status_destroy(void) {
@@ -3378,13 +3380,13 @@ ok: ;
if (le_val_is_del(le, ftcursor->is_snapshot_read, ftcursor->ttxn)) {
// Provisionally deleted stuff is gone.
// So we need to scan in the direction to see if we can find something.
- // Every 100 deleted leaf entries check if the leaf's key is within the search bounds.
- for (uint n_deleted = 1; ; n_deleted++) {
+ // Every 64 deleted leaf entries check if the leaf's key is within the search bounds.
+ for (uint64_t n_deleted = 1; ; n_deleted++) {
switch (search->direction) {
case FT_SEARCH_LEFT:
idx++;
- if (idx >= bn->data_buffer.num_klpairs() ||
- ((n_deleted % 64) == 0 && !search_continue(search, key, keylen))) {
+ if (idx >= bn->data_buffer.num_klpairs() || ((n_deleted % 64) == 0 && !search_continue(search, key, keylen))) {
+ STATUS_INC(FT_CURSOR_SKIP_DELETED_LEAF_ENTRY, n_deleted);
if (ftcursor->interrupt_cb && ftcursor->interrupt_cb(ftcursor->interrupt_cb_extra)) {
return TOKUDB_INTERRUPTED;
}
@@ -3393,6 +3395,7 @@ ok: ;
break;
case FT_SEARCH_RIGHT:
if (idx == 0) {
+ STATUS_INC(FT_CURSOR_SKIP_DELETED_LEAF_ENTRY, n_deleted);
if (ftcursor->interrupt_cb && ftcursor->interrupt_cb(ftcursor->interrupt_cb_extra)) {
return TOKUDB_INTERRUPTED;
}
@@ -3406,6 +3409,7 @@ ok: ;
r = bn->data_buffer.fetch_klpair(idx, &le, &keylen, &key);
assert_zero(r); // we just validated the index
if (!le_val_is_del(le, ftcursor->is_snapshot_read, ftcursor->ttxn)) {
+ STATUS_INC(FT_CURSOR_SKIP_DELETED_LEAF_ENTRY, n_deleted);
goto got_a_good_value;
}
}
diff --git a/storage/tokudb/ft-index/locktree/lock_request.cc b/storage/tokudb/ft-index/locktree/lock_request.cc
index 97fa780bb04..18f6051afdf 100644
--- a/storage/tokudb/ft-index/locktree/lock_request.cc
+++ b/storage/tokudb/ft-index/locktree/lock_request.cc
@@ -113,12 +113,19 @@ void lock_request::create(void) {
m_complete_r = 0;
m_state = state::UNINITIALIZED;
+ m_info = nullptr;
toku_cond_init(&m_wait_cond, nullptr);
+
+ m_start_test_callback = nullptr;
+ m_retry_test_callback = nullptr;
}
// destroy a lock request.
void lock_request::destroy(void) {
+ invariant(m_state != state::PENDING);
+ invariant(m_state != state::DESTROYED);
+ m_state = state::DESTROYED;
toku_destroy_dbt(&m_left_key_copy);
toku_destroy_dbt(&m_right_key_copy);
toku_cond_destroy(&m_wait_cond);
@@ -135,7 +142,7 @@ void lock_request::set(locktree *lt, TXNID txnid, const DBT *left_key, const DBT
toku_destroy_dbt(&m_right_key_copy);
m_type = lock_type;
m_state = state::INITIALIZED;
- m_info = lt->get_lock_request_info();
+ m_info = lt ? lt->get_lock_request_info() : nullptr;
m_big_txn = big_txn;
}
@@ -223,15 +230,18 @@ int lock_request::start(void) {
insert_into_lock_requests();
if (deadlock_exists(conflicts)) {
remove_from_lock_requests();
- complete(DB_LOCK_DEADLOCK);
+ r = DB_LOCK_DEADLOCK;
}
toku_mutex_unlock(&m_info->mutex);
- } else {
+ if (m_start_test_callback) m_start_test_callback(); // test callback
+ }
+
+ if (r != DB_LOCK_NOTGRANTED) {
complete(r);
}
conflicts.destroy();
- return m_state == state::COMPLETE ? m_complete_r : r;
+ return r;
}
// sleep on the lock request until it becomes resolved or the wait time has elapsed.
@@ -292,8 +302,8 @@ int lock_request::wait(uint64_t wait_time_ms, uint64_t killed_time_ms, int (*kil
// complete this lock request with the given return value
void lock_request::complete(int complete_r) {
- m_state = state::COMPLETE;
m_complete_r = complete_r;
+ m_state = state::COMPLETE;
}
const DBT *lock_request::get_left_key(void) const {
@@ -331,6 +341,7 @@ int lock_request::retry(void) {
if (r == 0) {
remove_from_lock_requests();
complete(r);
+ if (m_retry_test_callback) m_retry_test_callback(); // test callback
toku_cond_broadcast(&m_wait_cond);
}
@@ -416,7 +427,8 @@ void lock_request::remove_from_lock_requests(void) {
uint32_t idx;
lock_request *request;
int r = m_info->pending_lock_requests.find_zero<TXNID, find_by_txnid>(m_txnid, &request, &idx);
- invariant_zero(r && request == this);
+ invariant_zero(r);
+ invariant(request == this);
r = m_info->pending_lock_requests.delete_at(idx);
invariant_zero(r);
}
@@ -432,4 +444,12 @@ int lock_request::find_by_txnid(lock_request * const &request, const TXNID &txni
}
}
+void lock_request::set_start_test_callback(void (*f)(void)) {
+ m_start_test_callback = f;
+}
+
+void lock_request::set_retry_test_callback(void (*f)(void)) {
+ m_retry_test_callback = f;
+}
+
} /* namespace toku */
diff --git a/storage/tokudb/ft-index/locktree/lock_request.h b/storage/tokudb/ft-index/locktree/lock_request.h
index d1a4c2822e0..c504961fcc0 100644
--- a/storage/tokudb/ft-index/locktree/lock_request.h
+++ b/storage/tokudb/ft-index/locktree/lock_request.h
@@ -164,6 +164,8 @@ public:
// The rest remain pending.
static void retry_all_lock_requests(locktree *lt);
+ void set_start_test_callback(void (*f)(void));
+ void set_retry_test_callback(void (*f)(void));
private:
enum state {
@@ -171,6 +173,7 @@ private:
INITIALIZED,
PENDING,
COMPLETE,
+ DESTROYED,
};
// The keys for a lock request are stored "unowned" in m_left_key
@@ -236,6 +239,9 @@ private:
static int find_by_txnid(lock_request * const &request, const TXNID &txnid);
+ void (*m_start_test_callback)(void);
+ void (*m_retry_test_callback)(void);
+
friend class lock_request_unit_test;
};
ENSURE_POD(lock_request);
diff --git a/storage/tokudb/ft-index/locktree/locktree.cc b/storage/tokudb/ft-index/locktree/locktree.cc
index 27e528db8e8..fc2470e98bd 100644
--- a/storage/tokudb/ft-index/locktree/locktree.cc
+++ b/storage/tokudb/ft-index/locktree/locktree.cc
@@ -152,6 +152,7 @@ void locktree::create(locktree_manager *mgr, DICTIONARY_ID dict_id, const compar
void locktree::destroy(void) {
invariant(m_reference_count == 0);
+ invariant(m_lock_request_info.pending_lock_requests.size() == 0);
m_cmp.destroy();
m_rangetree->destroy();
toku_free(m_rangetree);
diff --git a/storage/tokudb/ft-index/locktree/tests/lock_request_start_retry_race.cc b/storage/tokudb/ft-index/locktree/tests/lock_request_start_retry_race.cc
new file mode 100644
index 00000000000..86ef2dd9cc5
--- /dev/null
+++ b/storage/tokudb/ft-index/locktree/tests/lock_request_start_retry_race.cc
@@ -0,0 +1,193 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*
+COPYING CONDITIONS NOTICE:
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation, and provided that the
+ following conditions are met:
+
+ * Redistributions of source code must retain this COPYING
+ CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the
+ DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the
+ PATENT MARKING NOTICE (below), and the PATENT RIGHTS
+ GRANT (below).
+
+ * Redistributions in binary form must reproduce this COPYING
+ CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the
+ DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the
+ PATENT MARKING NOTICE (below), and the PATENT RIGHTS
+ GRANT (below) in the documentation and/or other materials
+ provided with the distribution.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+COPYRIGHT NOTICE:
+
+ TokuFT, Tokutek Fractal Tree Indexing Library.
+ Copyright (C) 2007-2013 Tokutek, Inc.
+
+DISCLAIMER:
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+UNIVERSITY PATENT NOTICE:
+
+ The technology is licensed by the Massachusetts Institute of
+ Technology, Rutgers State University of New Jersey, and the Research
+ Foundation of State University of New York at Stony Brook under
+ United States of America Serial No. 11/760379 and to the patents
+ and/or patent applications resulting from it.
+
+PATENT MARKING NOTICE:
+
+ This software is covered by US Patent No. 8,185,551.
+ This software is covered by US Patent No. 8,489,638.
+
+PATENT RIGHTS GRANT:
+
+ "THIS IMPLEMENTATION" means the copyrightable works distributed by
+ Tokutek as part of the Fractal Tree project.
+
+ "PATENT CLAIMS" means the claims of patents that are owned or
+ licensable by Tokutek, both currently or in the future; and that in
+ the absence of this license would be infringed by THIS
+ IMPLEMENTATION or by using or running THIS IMPLEMENTATION.
+
+ "PATENT CHALLENGE" shall mean a challenge to the validity,
+ patentability, enforceability and/or non-infringement of any of the
+ PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS.
+
+ Tokutek hereby grants to you, for the term and geographical scope of
+ the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free,
+ irrevocable (except as stated in this section) patent license to
+ make, have made, use, offer to sell, sell, import, transfer, and
+ otherwise run, modify, and propagate the contents of THIS
+ IMPLEMENTATION, where such license applies only to the PATENT
+ CLAIMS. This grant does not include claims that would be infringed
+ only as a consequence of further modifications of THIS
+ IMPLEMENTATION. If you or your agent or licensee institute or order
+ or agree to the institution of patent litigation against any entity
+ (including a cross-claim or counterclaim in a lawsuit) alleging that
+ THIS IMPLEMENTATION constitutes direct or contributory patent
+ infringement, or inducement of patent infringement, then any rights
+ granted to you under this License shall terminate as of the date
+ such litigation is filed. If you or your agent or exclusive
+ licensee institute or order or agree to the institution of a PATENT
+ CHALLENGE, then Tokutek may terminate any rights granted to you
+ under this License.
+*/
+
+#ident "Copyright (c) 2014 Tokutek Inc. All rights reserved."
+#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
+
+#include <iostream>
+#include "test.h"
+#include "locktree.h"
+#include "lock_request.h"
+
+// Test FT-633, the data race on the lock request between ::start and ::retry
+// This test is non-deterministic. It uses sleeps at 2 critical places to
+// expose the data race on the lock requests state.
+
+namespace toku {
+
+struct locker_arg {
+ locktree *_lt;
+ TXNID _id;
+ const DBT *_key;
+
+ locker_arg(locktree *lt, TXNID id, const DBT *key) : _lt(lt), _id(id), _key(key) {
+ }
+};
+
+static void locker_callback(void) {
+ usleep(10000);
+}
+
+static void run_locker(locktree *lt, TXNID txnid, const DBT *key) {
+ int i;
+ for (i = 0; i < 1000; i++) {
+
+ lock_request request;
+ request.create();
+
+ request.set(lt, txnid, key, key, lock_request::type::WRITE, false);
+
+ // set the test callbacks
+ request.set_start_test_callback(locker_callback);
+ request.set_retry_test_callback(locker_callback);
+
+ // try to acquire the lock
+ int r = request.start();
+ if (r == DB_LOCK_NOTGRANTED) {
+ // wait for the lock to be granted
+ r = request.wait(10 * 1000);
+ }
+
+ if (r == 0) {
+ // release the lock
+ range_buffer buffer;
+ buffer.create();
+ buffer.append(key, key);
+ lt->release_locks(txnid, &buffer);
+ buffer.destroy();
+
+ // retry pending lock requests
+ lock_request::retry_all_lock_requests(lt);
+ }
+
+ request.destroy();
+ memset(&request, 0xab, sizeof request);
+
+ toku_pthread_yield();
+ if ((i % 10) == 0)
+ std::cout << toku_pthread_self() << " " << i << std::endl;
+ }
+}
+
+static void *locker(void *v_arg) {
+ locker_arg *arg = static_cast<locker_arg *>(v_arg);
+ run_locker(arg->_lt, arg->_id, arg->_key);
+ return arg;
+}
+
+} /* namespace toku */
+
+int main(void) {
+ int r;
+
+ toku::locktree lt;
+ DICTIONARY_ID dict_id = { 1 };
+ lt.create(nullptr, dict_id, toku::dbt_comparator);
+
+ const DBT *one = toku::get_dbt(1);
+
+ const int n_workers = 2;
+ toku_pthread_t ids[n_workers];
+ for (int i = 0; i < n_workers; i++) {
+ toku::locker_arg *arg = new toku::locker_arg(&lt, i, one);
+ r = toku_pthread_create(&ids[i], nullptr, toku::locker, arg);
+ assert_zero(r);
+ }
+ for (int i = 0; i < n_workers; i++) {
+ void *ret;
+ r = toku_pthread_join(ids[i], &ret);
+ assert_zero(r);
+ toku::locker_arg *arg = static_cast<toku::locker_arg *>(ret);
+ delete arg;
+ }
+
+ lt.release_reference();
+ lt.destroy();
+ return 0;
+}
+
diff --git a/storage/tokudb/ft-index/src/ydb_row_lock.cc b/storage/tokudb/ft-index/src/ydb_row_lock.cc
index 5ca853d92d9..4dd527220ec 100644
--- a/storage/tokudb/ft-index/src/ydb_row_lock.cc
+++ b/storage/tokudb/ft-index/src/ydb_row_lock.cc
@@ -307,6 +307,7 @@ void toku_db_grab_write_lock (DB *db, DBT *key, TOKUTXN tokutxn) {
int r = request.start();
invariant_zero(r);
db_txn_note_row_lock(db, txn_anc, key, key);
+ request.destroy();
}
void toku_db_release_lt_key_ranges(DB_TXN *txn, txn_lt_key_ranges *ranges) {
diff --git a/storage/tokudb/ft-index/src/ydb_txn.cc b/storage/tokudb/ft-index/src/ydb_txn.cc
index dd428c4d502..ce06e78b23f 100644
--- a/storage/tokudb/ft-index/src/ydb_txn.cc
+++ b/storage/tokudb/ft-index/src/ydb_txn.cc
@@ -422,6 +422,11 @@ static int toku_txn_discard(DB_TXN *txn, uint32_t flags) {
return 0;
}
+static bool toku_txn_is_prepared(DB_TXN *txn) {
+ TOKUTXN ttxn = db_txn_struct_i(txn)->tokutxn;
+ return toku_txn_get_state(ttxn) == TOKUTXN_PREPARING;
+}
+
static inline void txn_func_init(DB_TXN *txn) {
#define STXN(name) txn->name = locked_txn_ ## name
STXN(abort);
@@ -438,6 +443,7 @@ static inline void txn_func_init(DB_TXN *txn) {
SUTXN(discard);
#undef SUTXN
txn->id64 = toku_txn_id64;
+ txn->is_prepared = toku_txn_is_prepared;
}
//
diff --git a/storage/tokudb/ha_tokudb.cc b/storage/tokudb/ha_tokudb.cc
index 87c544c93e3..7fca80c28df 100644
--- a/storage/tokudb/ha_tokudb.cc
+++ b/storage/tokudb/ha_tokudb.cc
@@ -1774,7 +1774,7 @@ int ha_tokudb::initialize_share(const char* name, int mode) {
// initialize cardinality info from the status dictionary
share->n_rec_per_key = tokudb::compute_total_key_parts(table_share);
- share->rec_per_key = (uint64_t *) tokudb_my_realloc(share->rec_per_key, share->n_rec_per_key * sizeof (uint64_t), MYF(MY_FAE));
+ share->rec_per_key = (uint64_t *) tokudb_my_realloc(share->rec_per_key, share->n_rec_per_key * sizeof (uint64_t), MYF(MY_FAE + MY_ALLOW_ZERO_PTR));
error = tokudb::get_card_from_status(share->status_block, txn, share->n_rec_per_key, share->rec_per_key);
if (error) {
for (uint i = 0; i < share->n_rec_per_key; i++)
@@ -6037,6 +6037,9 @@ int ha_tokudb::extra(enum ha_extra_function operation) {
case HA_EXTRA_NO_IGNORE_NO_KEY:
using_ignore_no_key = false;
break;
+ case HA_EXTRA_NOT_USED:
+ case HA_EXTRA_PREPARE_FOR_RENAME:
+ break; // must do nothing and return 0
default:
break;
}
@@ -6282,7 +6285,11 @@ int ha_tokudb::start_stmt(THD * thd, thr_lock_type lock_type) {
int error = 0;
tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);
- DBUG_ASSERT(trx);
+ if (!trx) {
+ error = create_tokudb_trx_data_instance(&trx);
+ if (error) { goto cleanup; }
+ thd_set_ha_data(thd, tokudb_hton, trx);
+ }
/*
note that trx->stmt may have been already initialized as start_stmt()
diff --git a/storage/tokudb/ha_tokudb.h b/storage/tokudb/ha_tokudb.h
index 0fb6b2c6c36..ee4a0b7a02f 100644
--- a/storage/tokudb/ha_tokudb.h
+++ b/storage/tokudb/ha_tokudb.h
@@ -839,7 +839,7 @@ static inline bool key_is_clustering(const KEY *key) {
#else
static inline bool key_is_clustering(const KEY *key) {
- return key->option_struct && key->option_struct->clustering;
+ return key->flags & HA_CLUSTERING;
}
#endif
diff --git a/storage/tokudb/ha_tokudb_alter_56.cc b/storage/tokudb/ha_tokudb_alter_56.cc
index 1a03dc815a1..cae50446fa0 100644
--- a/storage/tokudb/ha_tokudb_alter_56.cc
+++ b/storage/tokudb/ha_tokudb_alter_56.cc
@@ -765,7 +765,9 @@ bool ha_tokudb::commit_inplace_alter_table(TABLE *altered_table, Alter_inplace_i
#else
THD::killed_state saved_killed_state = thd->killed;
thd->killed = THD::NOT_KILLED;
- for (volatile uint i = 0; wait_while_table_is_used(thd, table, HA_EXTRA_NOT_USED); i++) {
+ // MySQL does not handle HA_EXTRA_NOT_USED so we use HA_EXTRA_PREPARE_FOR_RENAME since it is passed through
+ // the partition storage engine and is treated as a NOP by tokudb
+ for (volatile uint i = 0; wait_while_table_is_used(thd, table, HA_EXTRA_PREPARE_FOR_RENAME); i++) {
if (thd->killed != THD::NOT_KILLED)
thd->killed = THD::NOT_KILLED;
sleep(1);
diff --git a/storage/tokudb/hatoku_hton.cc b/storage/tokudb/hatoku_hton.cc
index 37742227de2..99f9cc56037 100644
--- a/storage/tokudb/hatoku_hton.cc
+++ b/storage/tokudb/hatoku_hton.cc
@@ -415,10 +415,10 @@ static int tokudb_init_func(void *p) {
tokudb_hton->commit = tokudb_commit;
tokudb_hton->rollback = tokudb_rollback;
#if TOKU_INCLUDE_XA
- tokudb_hton->prepare=tokudb_xa_prepare;
- tokudb_hton->recover=tokudb_xa_recover;
- tokudb_hton->commit_by_xid=tokudb_commit_by_xid;
- tokudb_hton->rollback_by_xid=tokudb_rollback_by_xid;
+ tokudb_hton->prepare = tokudb_xa_prepare;
+ tokudb_hton->recover = tokudb_xa_recover;
+ tokudb_hton->commit_by_xid = tokudb_commit_by_xid;
+ tokudb_hton->rollback_by_xid = tokudb_rollback_by_xid;
#endif
tokudb_hton->table_options= tokudb_table_options;
@@ -775,16 +775,35 @@ static void tokudb_cleanup_handlers(tokudb_trx_data *trx, DB_TXN *txn) {
}
}
+#if MYSQL_VERSION_ID >= 50600
+extern "C" enum durability_properties thd_get_durability_property(const MYSQL_THD thd);
+#endif
+
+// Determine if an fsync is used when a transaction is committed.
+static bool tokudb_fsync_on_commit(THD *thd, tokudb_trx_data *trx, DB_TXN *txn) {
+#if MYSQL_VERSION_ID >= 50600
+ // Check the client durability property which is set during 2PC
+ if (thd_get_durability_property(thd) == HA_IGNORE_DURABILITY)
+ return false;
+#endif
+#if defined(MARIADB_BASE_VERSION)
+ // Check is the txn is prepared and the binlog is open
+ if (txn->is_prepared(txn) && mysql_bin_log.is_open())
+ return false;
+#endif
+ return THDVAR(thd, commit_sync) != 0;
+}
+
static int tokudb_commit(handlerton * hton, THD * thd, bool all) {
TOKUDB_DBUG_ENTER("");
DBUG_PRINT("trans", ("ending transaction %s", all ? "all" : "stmt"));
- uint32_t syncflag = THDVAR(thd, commit_sync) ? 0 : DB_TXN_NOSYNC;
tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, hton);
DB_TXN **txn = all ? &trx->all : &trx->stmt;
DB_TXN *this_txn = *txn;
if (this_txn) {
+ uint32_t syncflag = tokudb_fsync_on_commit(thd, trx, this_txn) ? 0 : DB_TXN_NOSYNC;
if (tokudb_debug & TOKUDB_DEBUG_TXN) {
- TOKUDB_TRACE("commit trx %u txn %p", all, this_txn);
+ TOKUDB_TRACE("commit trx %u txn %p syncflag %u", all, this_txn, syncflag);
}
// test hook to induce a crash on a debug build
DBUG_EXECUTE_IF("tokudb_crash_commit_before", DBUG_SUICIDE(););
@@ -838,7 +857,7 @@ static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all) {
TOKUDB_DBUG_ENTER("");
int r = 0;
- /* if support_xa is disable, just return */
+ // if tokudb_support_xa is disable, just return
if (!THDVAR(thd, support_xa)) {
TOKUDB_DBUG_RETURN(r);
}
@@ -1598,12 +1617,12 @@ static ST_FIELD_INFO tokudb_fractal_tree_info_field_info[] = {
static int tokudb_report_fractal_tree_info_for_db(const DBT *dname, const DBT *iname, TABLE *table, THD *thd) {
int error;
- DB *db;
uint64_t bt_num_blocks_allocated;
uint64_t bt_num_blocks_in_use;
uint64_t bt_size_allocated;
uint64_t bt_size_in_use;
+ DB *db = NULL;
error = db_create(&db, db_env, 0);
if (error) {
goto exit;
@@ -1615,12 +1634,6 @@ static int tokudb_report_fractal_tree_info_for_db(const DBT *dname, const DBT *i
error = db->get_fractal_tree_info64(db,
&bt_num_blocks_allocated, &bt_num_blocks_in_use,
&bt_size_allocated, &bt_size_in_use);
- {
- int close_error = db->close(db, 0);
- if (!error) {
- error = close_error;
- }
- }
if (error) {
goto exit;
}
@@ -1652,6 +1665,11 @@ static int tokudb_report_fractal_tree_info_for_db(const DBT *dname, const DBT *i
error = schema_table_store_record(thd, table);
exit:
+ if (db) {
+ int close_error = db->close(db, 0);
+ if (error == 0)
+ error = close_error;
+ }
return error;
}
diff --git a/storage/tokudb/mysql-test/tokudb/r/change_column_varchar.result b/storage/tokudb/mysql-test/tokudb/r/change_column_varchar.result
index 7f3d4265662..5018e2db278 100644
--- a/storage/tokudb/mysql-test/tokudb/r/change_column_varchar.result
+++ b/storage/tokudb/mysql-test/tokudb/r/change_column_varchar.result
@@ -6,6 +6,7 @@ Table Create Table
t CREATE TABLE `t` (
`a` varchar(1) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib'
+INSERT INTO t VALUES (null);
ALTER TABLE t CHANGE COLUMN a a VARCHAR(2);
ALTER TABLE t CHANGE COLUMN a a VARCHAR(2);
ALTER TABLE t CHANGE COLUMN a a VARCHAR(3);
diff --git a/storage/tokudb/mysql-test/tokudb/r/information-schema-global-status.result b/storage/tokudb/mysql-test/tokudb/r/information-schema-global-status.result
index 369c14fe4fe..7d0e83260e1 100644
--- a/storage/tokudb/mysql-test/tokudb/r/information-schema-global-status.result
+++ b/storage/tokudb/mysql-test/tokudb/r/information-schema-global-status.result
@@ -65,6 +65,7 @@ TOKUDB_CHECKPOINT_LONG_BEGIN_COUNT
TOKUDB_CHECKPOINT_LONG_BEGIN_TIME
TOKUDB_CHECKPOINT_PERIOD
TOKUDB_CHECKPOINT_TAKEN
+TOKUDB_CURSOR_SKIP_DELETED_LEAF_ENTRY
TOKUDB_DB_CLOSES
TOKUDB_DB_OPENS
TOKUDB_DB_OPEN_CURRENT
diff --git a/storage/tokudb/mysql-test/tokudb/t/bf_create_select.test b/storage/tokudb/mysql-test/tokudb/t/bf_create_select.test
index 20455da874f..8c457b75f27 100644
--- a/storage/tokudb/mysql-test/tokudb/t/bf_create_select.test
+++ b/storage/tokudb/mysql-test/tokudb/t/bf_create_select.test
@@ -65,8 +65,8 @@ while ($i < $maxq) {
let $time_elapsed_off = `select unix_timestamp() - $s`;
-# Check that the time with bulk fetch off is at least twice that whith bulk fetch on
-let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off >= 2 * $time_elapsed_on`;
+# Check that the time with bulk fetch off is greater that with bulk fetch on
+let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off > $time_elapsed_on`;
echo $verdict;
if ($debug) { echo index $verdict $time_elapsed_on $time_elapsed_off; }
if (!$verdict) { echo index $time_elapsed_on $time_elapsed_off; }
@@ -94,8 +94,8 @@ while ($i < $maxq) {
let $time_elapsed_off = `select unix_timestamp() - $s`;
-# Check that the time with bulk fetch off is at least twice that whith bulk fetch on
-let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off >= 2 * $time_elapsed_on`;
+# Check that the time with bulk fetch off is greater that with bulk fetch on
+let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off > $time_elapsed_on`;
echo $verdict;
if ($debug) { echo range $verdict $time_elapsed_on $time_elapsed_off; }
if (!$verdict) { echo range $time_elapsed_on $time_elapsed_off; }
diff --git a/storage/tokudb/mysql-test/tokudb/t/bf_create_select_hash_part.test b/storage/tokudb/mysql-test/tokudb/t/bf_create_select_hash_part.test
index 4df0a138be8..34995b4ba2f 100644
--- a/storage/tokudb/mysql-test/tokudb/t/bf_create_select_hash_part.test
+++ b/storage/tokudb/mysql-test/tokudb/t/bf_create_select_hash_part.test
@@ -65,8 +65,8 @@ while ($i < $maxq) {
}
let $time_elapsed_off = `select unix_timestamp() - $s`;
-# check that bulk fetch on is at least 1.5 times faster than bulk fetch off
-let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off >= 1.5 * $time_elapsed_on`;
+# check that bulk fetch on is greater than bulk fetch off
+let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off > $time_elapsed_on`;
echo $verdict;
if ($debug) { echo index $verdict $time_elapsed_off $time_elapsed_on; }
if (!$verdict) { echo index $time_elapsed_off $time_elapsed_on; }
@@ -93,8 +93,8 @@ while ($i < $maxq) {
}
let $time_elapsed_off = `select unix_timestamp() - $s`;
-# check that bulk fetch on is at least 1.5 times faster than bulk fetch off
-let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off >= 1.5 * $time_elapsed_on`;
+# check that bulk fetch on is greater than bulk fetch off
+let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off > $time_elapsed_on`;
echo $verdict;
if ($debug) { echo range $verdict $time_elapsed_off $time_elapsed_on; }
if (!$verdict) { echo range $time_elapsed_off $time_elapsed_on; }
diff --git a/storage/tokudb/mysql-test/tokudb/t/bf_create_select_range_part.test b/storage/tokudb/mysql-test/tokudb/t/bf_create_select_range_part.test
index b66b2c42808..a53249893f4 100644
--- a/storage/tokudb/mysql-test/tokudb/t/bf_create_select_range_part.test
+++ b/storage/tokudb/mysql-test/tokudb/t/bf_create_select_range_part.test
@@ -72,8 +72,8 @@ while ($i < $maxq) {
}
let $time_elapsed_off = `select unix_timestamp() - $s`;
-# check that bulk fetch on is at least 2 times faster than bulk fetch off
-let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off >= 2 * $time_elapsed_on`;
+# check that bulk fetch on is greater than bulk fetch off
+let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off > $time_elapsed_on`;
echo $verdict;
if ($debug) { echo index $verdict $time_elapsed_off $time_elapsed_on; }
if (!$verdict) { echo index $time_elapsed_off $time_elapsed_on; }
@@ -100,8 +100,8 @@ while ($i < $maxq) {
}
let $time_elapsed_off = `select unix_timestamp() - $s`;
-# check that bulk fetch on is at least 2 times faster than bulk fetch off
-let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off >= 2 * $time_elapsed_on`;
+# check that bulk fetch on is greater than bulk fetch off
+let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off > $time_elapsed_on`;
echo $verdict;
if ($debug) { echo range $verdict $time_elapsed_off $time_elapsed_on; }
if (!$verdict) { echo range $time_elapsed_off $time_elapsed_on; }
diff --git a/storage/tokudb/mysql-test/tokudb/t/bf_create_temp_select.test b/storage/tokudb/mysql-test/tokudb/t/bf_create_temp_select.test
index 52cb886a410..4f8211a51d9 100644
--- a/storage/tokudb/mysql-test/tokudb/t/bf_create_temp_select.test
+++ b/storage/tokudb/mysql-test/tokudb/t/bf_create_temp_select.test
@@ -65,8 +65,8 @@ while ($i < $maxq) {
let $time_elapsed_off = `select unix_timestamp() - $s`;
-# Check that the time with bulk fetch off is at least twice that whith bulk fetch on
-let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off >= 2 * $time_elapsed_on`;
+# Check that the time with bulk fetch off is greater that with bulk fetch on
+let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off > $time_elapsed_on`;
echo $verdict;
if ($debug) { echo index $verdict $time_elapsed_on $time_elapsed_off; }
if (!$verdict) { echo index $time_elapsed_on $time_elapsed_off; }
@@ -94,8 +94,8 @@ while ($i < $maxq) {
let $time_elapsed_off = `select unix_timestamp() - $s`;
-# Check that the time with bulk fetch off is at least twice that whith bulk fetch on
-let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off >= 2 * $time_elapsed_on`;
+# Check that the time with bulk fetch off is greater than with bulk fetch on
+let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off > $time_elapsed_on`;
echo $verdict;
if ($debug) { echo range $verdict $time_elapsed_on $time_elapsed_off; }
if (!$verdict) { echo range $time_elapsed_on $time_elapsed_off; }
diff --git a/storage/tokudb/mysql-test/tokudb/t/bf_delete.test b/storage/tokudb/mysql-test/tokudb/t/bf_delete.test
index f98e87f1ec1..a55d78784cc 100644
--- a/storage/tokudb/mysql-test/tokudb/t/bf_delete.test
+++ b/storage/tokudb/mysql-test/tokudb/t/bf_delete.test
@@ -1,5 +1,4 @@
-# Verify that index scans for delete statements use bulk fetch and are
-# at least twice as fast
+# Verify that index scans for delete statements use bulk fetch are faster than when not using bulk fetch
source include/have_tokudb.inc;
source include/big_test.inc;
@@ -62,8 +61,8 @@ while ($i < $maxq) {
}
let $time_elapsed_bf_off = `select unix_timestamp() - $s`;
-# verify that a delete scan with bulk fetch ON is at least 2 times faster than with bulk fetch OFF
-let $verdict = `select $time_elapsed_bf_on > 0 && $time_elapsed_bf_off >= 2 * $time_elapsed_bf_on`;
+# verify that a delete scan with bulk fetch ON is greater than with bulk fetch OFF
+let $verdict = `select $time_elapsed_bf_on > 0 && $time_elapsed_bf_off > $time_elapsed_bf_on`;
echo $verdict;
if ($debug) { echo range $verdict $time_elapsed_bf_on $time_elapsed_bf_off; }
if (!$verdict) { echo range $time_elapsed_bf_on $time_elapsed_bf_off; }
diff --git a/storage/tokudb/mysql-test/tokudb/t/bf_insert_select.test b/storage/tokudb/mysql-test/tokudb/t/bf_insert_select.test
index 5fcb8fa58b5..8d96c26bc29 100644
--- a/storage/tokudb/mysql-test/tokudb/t/bf_insert_select.test
+++ b/storage/tokudb/mysql-test/tokudb/t/bf_insert_select.test
@@ -66,8 +66,8 @@ while ($i < $maxq) {
}
let $time_elapsed_off = `select unix_timestamp() - $s`;
-# check that bulk fetch on is at least 2 times faster than bulk fetch off
-let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off >= 2 * $time_elapsed_on`;
+# check that the time with bulk fetch on is greater than with bulk fetch off
+let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off > $time_elapsed_on`;
echo $verdict;
if ($debug) { echo index $verdict $time_elapsed_off $time_elapsed_on; }
if (!$verdict) { echo index $time_elapsed_off $time_elapsed_on; }
@@ -92,7 +92,7 @@ while ($i < $maxq) {
}
let $time_elapsed_off = `select unix_timestamp() - $s`;
-let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off >= 2 * $time_elapsed_on`;
+let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off > $time_elapsed_on`;
echo $verdict;
if ($debug) { echo range $verdict $time_elapsed_off $time_elapsed_on; }
if (!$verdict) { echo range $time_elapsed_off $time_elapsed_on; }
diff --git a/storage/tokudb/mysql-test/tokudb/t/bf_insert_select_dup_key.test b/storage/tokudb/mysql-test/tokudb/t/bf_insert_select_dup_key.test
index 51c6d66d706..1b015b2c272 100644
--- a/storage/tokudb/mysql-test/tokudb/t/bf_insert_select_dup_key.test
+++ b/storage/tokudb/mysql-test/tokudb/t/bf_insert_select_dup_key.test
@@ -70,8 +70,8 @@ while ($i < $maxq) {
}
let $time_elapsed_off = `select unix_timestamp() - $s`;
-# check that bulk fetch on is at least 2 times faster than bulk fetch off
-let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off >= 2 * $time_elapsed_on`;
+# check that bulk fetch on is greater than with bulk fetch off
+let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off > $time_elapsed_on`;
echo $verdict;
if ($debug) { echo index $verdict $time_elapsed_off $time_elapsed_on; }
if (!$verdict) { echo index $time_elapsed_off $time_elapsed_on; }
@@ -96,8 +96,8 @@ while ($i < $maxq) {
}
let $time_elapsed_off = `select unix_timestamp() - $s`;
-# check that bulk fetch on is at least 2 times faster than bulk fetch off
-let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off >= 2 * $time_elapsed_on`;
+# check that bulk fetch on is greater than bulk fetch off
+let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off > $time_elapsed_on`;
echo $verdict;
if ($debug) { echo range $verdict $time_elapsed_off $time_elapsed_on; }
if (!$verdict) { echo range $time_elapsed_off $time_elapsed_on; }
diff --git a/storage/tokudb/mysql-test/tokudb/t/bf_replace_select.test b/storage/tokudb/mysql-test/tokudb/t/bf_replace_select.test
index 1ca754454eb..72cda349f80 100644
--- a/storage/tokudb/mysql-test/tokudb/t/bf_replace_select.test
+++ b/storage/tokudb/mysql-test/tokudb/t/bf_replace_select.test
@@ -66,8 +66,8 @@ while ($i < $maxq) {
}
let $time_elapsed_off = `select unix_timestamp() - $s`;
-# check that bulk fetch on is at least 2 times faster than bulk fetch off
-let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off >= 2 * $time_elapsed_on`;
+# check that the time with bulk fetch on is greater than with bulk fetch off
+let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off > $time_elapsed_on`;
echo $verdict;
if ($debug) { echo index $verdict $time_elapsed_off $time_elapsed_on; }
if (!$verdict) { echo index $time_elapsed_off $time_elapsed_on; }
@@ -92,7 +92,8 @@ while ($i < $maxq) {
}
let $time_elapsed_off = `select unix_timestamp() - $s`;
-let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off >= 2 * $time_elapsed_on`;
+# check that the time with bulk fetch on is greater than with bulk fetch off
+let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off > $time_elapsed_on`;
echo $verdict;
if ($debug) { echo range $verdict $time_elapsed_off $time_elapsed_on; }
if (!$verdict) { echo range $time_elapsed_off $time_elapsed_on; }
diff --git a/storage/tokudb/mysql-test/tokudb/t/bf_select_hash_part.test b/storage/tokudb/mysql-test/tokudb/t/bf_select_hash_part.test
index 2271a2086be..46986278743 100644
--- a/storage/tokudb/mysql-test/tokudb/t/bf_select_hash_part.test
+++ b/storage/tokudb/mysql-test/tokudb/t/bf_select_hash_part.test
@@ -65,8 +65,8 @@ let $time_bf_off = `select unix_timestamp() - $s`;
if ($debug) { echo index scans took $time_bf_off.; }
-# check that the scan time with bulk fetch off is at least 1.5 times as long as with bulk fetch on
-let $verdict = `select $time_bf_on > 0 && $time_bf_off >= 1.5 * $time_bf_on`;
+# check that the scan time with bulk fetch off is greater than with bulk fetch on
+let $verdict = `select $time_bf_on > 0 && $time_bf_off > $time_bf_on`;
echo $verdict;
if ($debug) { echo index $verdict $time_bf_on $time_bf_off; }
if (!$verdict) { echo index $time_bf_on $time_bf_off; }
@@ -93,8 +93,8 @@ let $time_bf_off = `select unix_timestamp() - $s`;
if ($debug) { echo range scans took $time_bf_off.; }
-# check that the scan time with bulk fetch off is at least 1.5 times as long as with bulk fetch on
-let $verdict = `select $time_bf_on > 0 && $time_bf_off >= 1.5 * $time_bf_on`;
+# check that the scan time with bulk fetch off is greater than with bulk fetch on
+let $verdict = `select $time_bf_on > 0 && $time_bf_off > $time_bf_on`;
echo $verdict;
if ($debug) { echo range $verdict $time_bf_on $time_bf_off; }
if (!$verdict) { echo range $time_bf_on $time_bf_off; }
diff --git a/storage/tokudb/mysql-test/tokudb/t/bf_select_range_part.test b/storage/tokudb/mysql-test/tokudb/t/bf_select_range_part.test
index 9dcb044d4d4..7e608777798 100644
--- a/storage/tokudb/mysql-test/tokudb/t/bf_select_range_part.test
+++ b/storage/tokudb/mysql-test/tokudb/t/bf_select_range_part.test
@@ -70,8 +70,8 @@ while ($i < $maxq) {
}
let $time_bf_off = `select unix_timestamp() - $s`;
-# check that the scan time with bulk fetch off is at least 1.5 times as long as with bulk fetch on
-let $verdict = `select $time_bf_on > 0 && $time_bf_off >= 1.5 * $time_bf_on`;
+# check that the scan time with bulk fetch off is greater than with bulk fetch on
+let $verdict = `select $time_bf_on > 0 && $time_bf_off > $time_bf_on`;
echo $verdict;
if ($debug) { echo index $verdict $time_bf_on $time_bf_off; }
if (!$verdict) { echo index scan $time_bf_on $time_bf_off; }
@@ -94,8 +94,8 @@ while ($i < $maxq) {
}
let $time_bf_off = `select unix_timestamp() - $s`;
-# check that the scan time with bulk fetch off is at least 1.5 times as long as with bulk fetch on
-let $verdict = `select $time_bf_on > 0 && $time_bf_off >= 1.5 * $time_bf_on`;
+# check that the scan time with bulk fetch off is greater than with bulk fetch on
+let $verdict = `select $time_bf_on > 0 && $time_bf_off > $time_bf_on`;
echo $verdict;
if ($debug) { echo range $verdict $time_bf_on $time_bf_off; }
if (!$verdict) { echo range $time_bf_on $time_bf_off; }
diff --git a/storage/tokudb/mysql-test/tokudb/t/change_column_varchar.test b/storage/tokudb/mysql-test/tokudb/t/change_column_varchar.test
index f2b4e3cbf78..6543952ad29 100644
--- a/storage/tokudb/mysql-test/tokudb/t/change_column_varchar.test
+++ b/storage/tokudb/mysql-test/tokudb/t/change_column_varchar.test
@@ -9,6 +9,7 @@ SET SESSION TOKUDB_DISABLE_SLOW_ALTER=ON;
CREATE TABLE t (a VARCHAR(1)) ENGINE=TokuDB;
SHOW CREATE TABLE t;
+INSERT INTO t VALUES (null);
# 1->1
let $i=1
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/disabled.def b/storage/tokudb/mysql-test/tokudb_bugs/disabled.def
index 0bf13a5e86e..86d38a530da 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/disabled.def
+++ b/storage/tokudb/mysql-test/tokudb_bugs/disabled.def
@@ -9,3 +9,6 @@ fileops-3: how this could work, if alter needs an exclusive mdl lock?
checkpoint_lock_2: test can not work when the checkpoint_safe_lock is a fair rwlock
+# this test was added in 7.5.4 and fails in 10.0
+# but running this very test in 7.5.3 fails in exactly the same manner
+db768: never worked. tokutek was informed, 2015-01-14
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db762.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db762.result
new file mode 100644
index 00000000000..159957dba3e
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db762.result
@@ -0,0 +1,7 @@
+drop table if exists t1,t2;
+create table t1 (x int) engine=innodb;
+lock table t1 read;
+create temporary table t2 (x int) engine=tokudb;
+insert into t2 values (1);
+unlock tables;
+drop table t1, t2;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db766.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db766.result
new file mode 100644
index 00000000000..e109b0a7f15
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db766.result
@@ -0,0 +1,7 @@
+set default_storage_engine=TokuDB;
+drop table if exists t1;
+CREATE TABLE t1(c1 INT,c2 CHAR)PARTITION BY KEY(c1) PARTITIONS 5;
+insert INTO t1 values(1,1),(2,1),(2,2),(2,3);
+ALTER TABLE t1 ADD UNIQUE INDEX i1(c1);
+ERROR 23000: Can't write; duplicate key in table 't1'
+drop table t1;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db768.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db768.result
new file mode 100644
index 00000000000..f302114b45c
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db768.result
@@ -0,0 +1,10 @@
+set default_storage_engine='tokudb';
+drop table if exists t;
+create table t (id int primary key);
+set autocommit=OFF;
+lock tables t write;
+optimize table t;
+Table Op Msg_type Msg_text
+test.t optimize status OK
+unlock tables;
+drop table t;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db771.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db771.result
new file mode 100644
index 00000000000..3fc012a732f
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db771.result
@@ -0,0 +1,11 @@
+set default_storage_engine=tokudb;
+drop table if exists t1;
+CREATE TABLE t1 (a int key, b varchar(32), c varchar(32));
+REPLACE t1 SET a = 4;
+ALTER TABLE t1 CHANGE COLUMN c c VARCHAR(500);
+update t1 set b='hi';
+update t1 set c='there';
+select * from t1;
+a b c
+4 hi there
+drop table t1;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/5585-master.opt b/storage/tokudb/mysql-test/tokudb_bugs/t/5585-master.opt
new file mode 100644
index 00000000000..017432e797d
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/5585-master.opt
@@ -0,0 +1 @@
+--tokudb-cache-size=1000000000 --innodb-buffer-pool-size=1000000000
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db762.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db762.test
new file mode 100644
index 00000000000..4428e9df8ec
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db762.test
@@ -0,0 +1,13 @@
+# test for DB-762 and DB-767
+source include/have_tokudb.inc;
+source include/have_innodb.inc;
+disable_warnings;
+drop table if exists t1,t2;
+enable_warnings;
+create table t1 (x int) engine=innodb;
+lock table t1 read;
+create temporary table t2 (x int) engine=tokudb;
+insert into t2 values (1);
+unlock tables;
+drop table t1, t2;
+
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db766.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db766.test
new file mode 100644
index 00000000000..3aad3404296
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db766.test
@@ -0,0 +1,12 @@
+# reproducer for DB-766
+source include/have_tokudb.inc;
+source include/have_partition.inc;
+set default_storage_engine=TokuDB;
+disable_warnings;
+drop table if exists t1;
+enable_warnings;
+CREATE TABLE t1(c1 INT,c2 CHAR)PARTITION BY KEY(c1) PARTITIONS 5;
+insert INTO t1 values(1,1),(2,1),(2,2),(2,3);
+--error ER_DUP_KEY
+ALTER TABLE t1 ADD UNIQUE INDEX i1(c1);
+drop table t1;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db768.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db768.test
new file mode 100644
index 00000000000..be2155f5c18
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db768.test
@@ -0,0 +1,12 @@
+# test case for DB-768
+source include/have_tokudb.inc;
+set default_storage_engine='tokudb';
+disable_warnings;
+drop table if exists t;
+enable_warnings;
+create table t (id int primary key);
+set autocommit=OFF;
+lock tables t write;
+optimize table t;
+unlock tables;
+drop table t;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db771.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db771.test
new file mode 100644
index 00000000000..039ad7471c1
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db771.test
@@ -0,0 +1,13 @@
+# test case for DB-771
+source include/have_tokudb.inc;
+set default_storage_engine=tokudb;
+disable_warnings;
+drop table if exists t1;
+enable_warnings;
+CREATE TABLE t1 (a int key, b varchar(32), c varchar(32));
+REPLACE t1 SET a = 4;
+ALTER TABLE t1 CHANGE COLUMN c c VARCHAR(500);
+update t1 set b='hi';
+update t1 set c='there';
+select * from t1;
+drop table t1;
diff --git a/storage/tokudb/tokudb_update_fun.cc b/storage/tokudb/tokudb_update_fun.cc
index 3ab7510121a..9754dc989a2 100644
--- a/storage/tokudb/tokudb_update_fun.cc
+++ b/storage/tokudb/tokudb_update_fun.cc
@@ -851,7 +851,7 @@ static int tokudb_expand_variable_offsets(
DBT new_val; memset(&new_val, 0, sizeof new_val);
if (old_val != NULL) {
- assert(offset_start + number_of_offsets < old_val->size);
+ assert(offset_start + number_of_offsets <= old_val->size);
// compute the new val from the old val
uchar *old_val_ptr = (uchar *)old_val->data;
diff --git a/storage/xtradb/btr/btr0cur.cc b/storage/xtradb/btr/btr0cur.cc
index 64f813f282d..323bb468527 100644
--- a/storage/xtradb/btr/btr0cur.cc
+++ b/storage/xtradb/btr/btr0cur.cc
@@ -2519,6 +2519,38 @@ btr_cur_pess_upd_restore_supremum(
}
/*************************************************************//**
+Check if the total length of the modified blob for the row is within 10%
+of the total redo log size. This constraint on the blob length is to
+avoid overwriting the redo logs beyond the last checkpoint lsn.
+@return DB_SUCCESS or DB_TOO_BIG_RECORD. */
+static
+dberr_t
+btr_check_blob_limit(const big_rec_t* big_rec_vec)
+{
+ const ib_uint64_t redo_size = srv_n_log_files * srv_log_file_size
+ * UNIV_PAGE_SIZE;
+ const ulint redo_10p = redo_size / 10;
+ ulint total_blob_len = 0;
+ dberr_t err = DB_SUCCESS;
+
+ /* Calculate the total number of bytes for blob data */
+ for (ulint i = 0; i < big_rec_vec->n_fields; i++) {
+ total_blob_len += big_rec_vec->fields[i].len;
+ }
+
+ if (total_blob_len > redo_10p) {
+ ib_logf(IB_LOG_LEVEL_ERROR, "The total blob data"
+ " length (" ULINTPF ") is greater than"
+ " 10%% of the total redo log size (" UINT64PF
+ "). Please increase total redo log size.",
+ total_blob_len, redo_size);
+ err = DB_TOO_BIG_RECORD;
+ }
+
+ return(err);
+}
+
+/*************************************************************//**
Performs an update of a record on a page of a tree. It is assumed
that mtr holds an x-latch on the tree and on the cursor page. If the
update is made on the leaf level, to avoid deadlocks, mtr must also
@@ -2756,26 +2788,14 @@ make_external:
}
if (big_rec_vec) {
- const ulint redo_10p = srv_log_file_size * UNIV_PAGE_SIZE / 10;
- ulint total_blob_len = 0;
- /* Calculate the total number of bytes for blob data */
- for (ulint i = 0; i < big_rec_vec->n_fields; i++) {
- total_blob_len += big_rec_vec->fields[i].len;
- }
+ err = btr_check_blob_limit(big_rec_vec);
- if (total_blob_len > redo_10p) {
- ib_logf(IB_LOG_LEVEL_ERROR, "The total blob data"
- " length (" ULINTPF ") is greater than"
- " 10%% of the redo log file size (" UINT64PF
- "). Please increase innodb_log_file_size.",
- total_blob_len, srv_log_file_size);
+ if (err != DB_SUCCESS) {
if (n_reserved > 0) {
fil_space_release_free_extents(
index->space, n_reserved);
}
-
- err = DB_TOO_BIG_RECORD;
goto err_exit;
}
}
@@ -4035,7 +4055,7 @@ btr_estimate_number_of_different_key_vals(
ib_uint64_t* n_diff;
ib_uint64_t* n_not_null;
ibool stats_null_not_equal;
- ullint n_sample_pages; /* number of pages to sample */
+ ullint n_sample_pages=1; /* number of pages to sample */
ulint not_empty_flag = 0;
ulint total_external_size = 0;
ulint i;
@@ -4088,25 +4108,57 @@ btr_estimate_number_of_different_key_vals(
if (srv_stats_transient_sample_pages > index->stat_index_size) {
if (index->stat_index_size > 0) {
n_sample_pages = index->stat_index_size;
- } else {
- n_sample_pages = 1;
}
} else {
n_sample_pages = srv_stats_transient_sample_pages;
}
} else {
- /* New logaritmic number of pages that are estimated. We
- first pick minimun from srv_stats_transient_sample_pages and number of
- pages on index. Then we pick maximum from previous number of
- pages and log2(number of index pages) * srv_stats_transient_sample_pages. */
- if (index->stat_index_size > 0) {
- n_sample_pages = ut_max(ut_min(srv_stats_transient_sample_pages, index->stat_index_size),
- log2(index->stat_index_size)*srv_stats_transient_sample_pages);
- } else {
- n_sample_pages = 1;
+ /* New logaritmic number of pages that are estimated.
+ Number of pages estimated should be between 1 and
+ index->stat_index_size.
+
+ If we have only 0 or 1 index pages then we can only take 1
+ sample. We have already initialized n_sample_pages to 1.
+
+ So taking index size as I and sample as S and log(I)*S as L
+
+ requirement 1) we want the out limit of the expression to not exceed I;
+ requirement 2) we want the ideal pages to be at least S;
+ so the current expression is min(I, max( min(S,I), L)
+
+ looking for simplifications:
+
+ case 1: assume S < I
+ min(I, max( min(S,I), L) -> min(I , max( S, L))
+
+ but since L=LOG2(I)*S and log2(I) >=1 L>S always so max(S,L) = L.
+
+ so we have: min(I , L)
+
+ case 2: assume I < S
+ min(I, max( min(S,I), L) -> min(I, max( I, L))
+
+ case 2a: L > I
+ min(I, max( I, L)) -> min(I, L) -> I
+
+ case 2b: when L < I
+ min(I, max( I, L)) -> min(I, I ) -> I
+
+ so taking all case2 paths is I, our expression is:
+ n_pages = S < I? min(I,L) : I
+ */
+ if (index->stat_index_size > 1) {
+ n_sample_pages = (srv_stats_transient_sample_pages < index->stat_index_size) ?
+ ut_min(index->stat_index_size,
+ log2(index->stat_index_size)*srv_stats_transient_sample_pages)
+ : index->stat_index_size;
+
}
}
+ /* Sanity check */
+ ut_ad(n_sample_pages > 0 && n_sample_pages <= (index->stat_index_size < 1 ? 1 : index->stat_index_size));
+
/* We sample some pages in the index to get an estimate */
for (i = 0; i < n_sample_pages; i++) {
@@ -4644,7 +4696,6 @@ btr_store_big_rec_extern_fields(
buf_block_t** freed_pages = NULL;
ulint n_freed_pages = 0;
dberr_t error = DB_SUCCESS;
- ulint total_blob_len = 0;
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(rec_offs_any_extern(offsets));
@@ -4664,21 +4715,11 @@ btr_store_big_rec_extern_fields(
rec_page_no = buf_block_get_page_no(rec_block);
ut_a(fil_page_get_type(page_align(rec)) == FIL_PAGE_INDEX);
- const ulint redo_10p = (srv_log_file_size * UNIV_PAGE_SIZE / 10);
-
- /* Calculate the total number of bytes for blob data */
- for (ulint i = 0; i < big_rec_vec->n_fields; i++) {
- total_blob_len += big_rec_vec->fields[i].len;
- }
+ error = btr_check_blob_limit(big_rec_vec);
- if (total_blob_len > redo_10p) {
+ if (error != DB_SUCCESS) {
ut_ad(op == BTR_STORE_INSERT);
- ib_logf(IB_LOG_LEVEL_ERROR, "The total blob data length"
- " (" ULINTPF ") is greater than 10%% of the"
- " redo log file size (" UINT64PF "). Please"
- " increase innodb_log_file_size.",
- total_blob_len, srv_log_file_size);
- return(DB_TOO_BIG_RECORD);
+ return(error);
}
if (page_zip) {
diff --git a/storage/xtradb/buf/buf0buf.cc b/storage/xtradb/buf/buf0buf.cc
index abb4a6f3ab9..d6f210e1430 100644
--- a/storage/xtradb/buf/buf0buf.cc
+++ b/storage/xtradb/buf/buf0buf.cc
@@ -634,9 +634,14 @@ buf_page_is_corrupted(
checksum_field2 = mach_read_from_4(
read_buf + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN_OLD_CHKSUM);
+#if FIL_PAGE_LSN % 8
+#error "FIL_PAGE_LSN must be 64 bit aligned"
+#endif
+
/* declare empty pages non-corrupted */
if (checksum_field1 == 0 && checksum_field2 == 0
- && mach_read_from_4(read_buf + FIL_PAGE_LSN) == 0) {
+ && *reinterpret_cast<const ib_uint64_t*>(read_buf +
+ FIL_PAGE_LSN) == 0) {
/* make sure that the page is really empty */
for (ulint i = 0; i < UNIV_PAGE_SIZE; i++) {
if (read_buf[i] != 0) {
@@ -1691,8 +1696,9 @@ buf_pool_watch_is_sentinel(
/****************************************************************//**
Add watch for the given page to be read in. Caller must have
-appropriate hash_lock for the bpage. This function may release the
-hash_lock and reacquire it.
+appropriate hash_lock for the bpage and hold the LRU list mutex to avoid a race
+condition with buf_LRU_free_page inserting the same page into the page hash.
+This function may release the hash_lock and reacquire it.
@return NULL if watch set, block if the page is in the buffer pool */
UNIV_INTERN
buf_page_t*
@@ -1707,6 +1713,8 @@ buf_pool_watch_set(
buf_pool_t* buf_pool = buf_pool_get(space, offset);
prio_rw_lock_t* hash_lock;
+ ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
+
hash_lock = buf_page_hash_lock_get(buf_pool, fold);
#ifdef UNIV_SYNC_DEBUG
@@ -1775,6 +1783,7 @@ page_found:
bpage->space = static_cast<ib_uint32_t>(space);
bpage->offset = static_cast<ib_uint32_t>(offset);
bpage->buf_fix_count = 1;
+ bpage->buf_pool_index = buf_pool_index(buf_pool);
mutex_exit(&buf_pool->zip_mutex);
@@ -2721,9 +2730,11 @@ loop:
/* Page not in buf_pool: needs to be read from file */
if (mode == BUF_GET_IF_IN_POOL_OR_WATCH) {
+ mutex_enter(&buf_pool->LRU_list_mutex);
rw_lock_x_lock(hash_lock);
block = (buf_block_t*) buf_pool_watch_set(
space, offset, fold);
+ mutex_exit(&buf_pool->LRU_list_mutex);
if (UNIV_LIKELY_NULL(block)) {
/* We can release hash_lock after we
@@ -3055,15 +3066,19 @@ got_block:
if (buf_LRU_free_page(&fix_block->page, true)) {
mutex_exit(fix_mutex);
- rw_lock_x_lock(hash_lock);
if (mode == BUF_GET_IF_IN_POOL_OR_WATCH) {
+ mutex_enter(&buf_pool->LRU_list_mutex);
+ rw_lock_x_lock(hash_lock);
+
/* Set the watch, as it would have
been set if the page were not in the
buffer pool in the first place. */
block = (buf_block_t*) buf_pool_watch_set(
space, offset, fold);
+ mutex_exit(&buf_pool->LRU_list_mutex);
} else {
+ rw_lock_x_lock(hash_lock);
block = (buf_block_t*) buf_page_hash_get_low(
buf_pool, space, offset, fold);
}
diff --git a/storage/xtradb/buf/buf0flu.cc b/storage/xtradb/buf/buf0flu.cc
index d7d5e147cf3..ab48e4523b3 100644
--- a/storage/xtradb/buf/buf0flu.cc
+++ b/storage/xtradb/buf/buf0flu.cc
@@ -831,39 +831,35 @@ buf_flush_init_for_writing(
case SRV_CHECKSUM_ALGORITHM_CRC32:
case SRV_CHECKSUM_ALGORITHM_STRICT_CRC32:
checksum = buf_calc_page_crc32(page);
+ mach_write_to_4(page + FIL_PAGE_SPACE_OR_CHKSUM, checksum);
break;
case SRV_CHECKSUM_ALGORITHM_INNODB:
case SRV_CHECKSUM_ALGORITHM_STRICT_INNODB:
checksum = (ib_uint32_t) buf_calc_page_new_checksum(page);
+ mach_write_to_4(page + FIL_PAGE_SPACE_OR_CHKSUM, checksum);
+ checksum = (ib_uint32_t) buf_calc_page_old_checksum(page);
break;
case SRV_CHECKSUM_ALGORITHM_NONE:
case SRV_CHECKSUM_ALGORITHM_STRICT_NONE:
checksum = BUF_NO_CHECKSUM_MAGIC;
+ mach_write_to_4(page + FIL_PAGE_SPACE_OR_CHKSUM, checksum);
break;
/* no default so the compiler will emit a warning if new enum
is added and not handled here */
}
- mach_write_to_4(page + FIL_PAGE_SPACE_OR_CHKSUM, checksum);
-
- /* We overwrite the first 4 bytes of the end lsn field to store
- the old formula checksum. Since it depends also on the field
- FIL_PAGE_SPACE_OR_CHKSUM, it has to be calculated after storing the
- new formula checksum. */
-
- if (srv_checksum_algorithm == SRV_CHECKSUM_ALGORITHM_STRICT_INNODB
- || srv_checksum_algorithm == SRV_CHECKSUM_ALGORITHM_INNODB) {
+ /* With the InnoDB checksum, we overwrite the first 4 bytes of
+ the end lsn field to store the old formula checksum. Since it
+ depends also on the field FIL_PAGE_SPACE_OR_CHKSUM, it has to
+ be calculated after storing the new formula checksum.
- checksum = (ib_uint32_t) buf_calc_page_old_checksum(page);
-
- /* In other cases we use the value assigned from above.
- If CRC32 is used then it is faster to use that checksum
- (calculated above) instead of calculating another one.
- We can afford to store something other than
- buf_calc_page_old_checksum() or BUF_NO_CHECKSUM_MAGIC in
- this field because the file will not be readable by old
- versions of MySQL/InnoDB anyway (older than MySQL 5.6.3) */
- }
+ In other cases we write the same value to both fields.
+ If CRC32 is used then it is faster to use that checksum
+ (calculated above) instead of calculating another one.
+ We can afford to store something other than
+ buf_calc_page_old_checksum() or BUF_NO_CHECKSUM_MAGIC in
+ this field because the file will not be readable by old
+ versions of MySQL/InnoDB anyway (older than MySQL 5.6.3) */
mach_write_to_4(page + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN_OLD_CHKSUM,
checksum);
diff --git a/storage/xtradb/dict/dict0dict.cc b/storage/xtradb/dict/dict0dict.cc
index 3a2c67f57fe..85f080cc6b8 100644
--- a/storage/xtradb/dict/dict0dict.cc
+++ b/storage/xtradb/dict/dict0dict.cc
@@ -39,6 +39,16 @@ UNIV_INTERN dict_index_t* dict_ind_redundant;
/** dummy index for ROW_FORMAT=COMPACT supremum and infimum records */
UNIV_INTERN dict_index_t* dict_ind_compact;
+#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
+/** Flag to control insert buffer debugging. */
+extern UNIV_INTERN uint ibuf_debug;
+#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
+
+/**********************************************************************
+Issue a warning that the row is too big. */
+void
+ib_warn_row_too_big(const dict_table_t* table);
+
#ifndef UNIV_HOTBACKUP
#include "buf0buf.h"
#include "data0type.h"
@@ -2410,11 +2420,18 @@ dict_index_add_to_cache(
new_index->n_fields = new_index->n_def;
new_index->trx_id = index->trx_id;
- if (strict && dict_index_too_big_for_tree(table, new_index)) {
+ if (dict_index_too_big_for_tree(table, new_index)) {
+
+ if (strict) {
too_big:
- dict_mem_index_free(new_index);
- dict_mem_index_free(index);
- return(DB_TOO_BIG_RECORD);
+ dict_mem_index_free(new_index);
+ dict_mem_index_free(index);
+ return(DB_TOO_BIG_RECORD);
+ } else {
+
+ ib_warn_row_too_big(table);
+
+ }
}
if (dict_index_is_univ(index)) {
@@ -5747,11 +5764,11 @@ dict_set_corrupted(
dict_index_copy_types(tuple, sys_index, 2);
- btr_cur_search_to_nth_level(sys_index, 0, tuple, PAGE_CUR_GE,
+ btr_cur_search_to_nth_level(sys_index, 0, tuple, PAGE_CUR_LE,
BTR_MODIFY_LEAF,
&cursor, 0, __FILE__, __LINE__, &mtr);
- if (cursor.up_match == dtuple_get_n_fields(tuple)) {
+ if (cursor.low_match == dtuple_get_n_fields(tuple)) {
/* UPDATE SYS_INDEXES SET TYPE=index->type
WHERE TABLE_ID=index->table->id AND INDEX_ID=index->id */
ulint len;
diff --git a/storage/xtradb/fil/fil0fil.cc b/storage/xtradb/fil/fil0fil.cc
index 759cdfc9f45..5d547edaaf9 100644
--- a/storage/xtradb/fil/fil0fil.cc
+++ b/storage/xtradb/fil/fil0fil.cc
@@ -55,6 +55,8 @@ Created 10/25/1995 Heikki Tuuri
static ulint srv_data_read, srv_data_written;
#endif /* !UNIV_HOTBACKUP */
+MYSQL_PLUGIN_IMPORT extern my_bool lower_case_file_system;
+
/*
IMPLEMENTATION OF THE TABLESPACE MEMORY CACHE
=============================================
@@ -1973,7 +1975,8 @@ UNIV_INTERN
ibool
fil_inc_pending_ops(
/*================*/
- ulint id) /*!< in: space id */
+ ulint id, /*!< in: space id */
+ ibool print_err) /*!< in: need to print error or not */
{
fil_space_t* space;
@@ -1982,10 +1985,12 @@ fil_inc_pending_ops(
space = fil_space_get_by_id(id);
if (space == NULL) {
- fprintf(stderr,
- "InnoDB: Error: trying to do an operation on a"
- " dropped tablespace %lu\n",
- (ulong) id);
+ if (print_err) {
+ fprintf(stderr,
+ "InnoDB: Error: trying to do an operation on a"
+ " dropped tablespace %lu\n",
+ (ulong) id);
+ }
}
if (space == NULL || space->stop_new_ops) {
@@ -4134,7 +4139,18 @@ fil_load_single_table_tablespace(
/* Build up the tablename in the standard form database/table. */
tablename = static_cast<char*>(
mem_alloc(dbname_len + filename_len + 2));
- sprintf(tablename, "%s/%s", dbname, filename);
+
+ /* When lower_case_table_names = 2 it is possible that the
+ dbname is in upper case ,but while storing it in fil_space_t
+ we must convert it into lower case */
+ sprintf(tablename, "%s" , dbname);
+ tablename[dbname_len] = '\0';
+
+ if (lower_case_file_system) {
+ dict_casedn_str(tablename);
+ }
+
+ sprintf(tablename+dbname_len,"/%s",filename);
tablename_len = strlen(tablename) - strlen(".ibd");
tablename[tablename_len] = '\0';
diff --git a/storage/xtradb/fts/fts0fts.cc b/storage/xtradb/fts/fts0fts.cc
index d57f1e09b54..37e742bf938 100644
--- a/storage/xtradb/fts/fts0fts.cc
+++ b/storage/xtradb/fts/fts0fts.cc
@@ -36,6 +36,7 @@ Full Text Search interface
#include "dict0priv.h"
#include "dict0stats.h"
#include "btr0pcur.h"
+#include <vector>
#include "ha_prototypes.h"
@@ -899,12 +900,14 @@ fts_drop_index(
index_cache = fts_find_index_cache(cache, index);
- if (index_cache->words) {
- fts_words_free(index_cache->words);
- rbt_free(index_cache->words);
- }
+ if (index_cache != NULL) {
+ if (index_cache->words) {
+ fts_words_free(index_cache->words);
+ rbt_free(index_cache->words);
+ }
- ib_vector_remove(cache->indexes, *(void**) index_cache);
+ ib_vector_remove(cache->indexes, *(void**) index_cache);
+ }
if (cache->get_docs) {
fts_reset_get_doc(cache);
@@ -1255,7 +1258,8 @@ fts_tokenizer_word_get(
#endif
/* If it is a stopword, do not index it */
- if (rbt_search(cache->stopword_info.cached_stopword,
+ if (cache->stopword_info.cached_stopword != NULL
+ && rbt_search(cache->stopword_info.cached_stopword,
&parent, text) == 0) {
return(NULL);
@@ -3558,6 +3562,12 @@ fts_add_doc_by_id(
rw_lock_x_lock(&table->fts->cache->lock);
+ if (table->fts->cache->stopword_info.status
+ & STOPWORD_NOT_INIT) {
+ fts_load_stopword(table, NULL, NULL,
+ NULL, TRUE, TRUE);
+ }
+
fts_cache_add_doc(
table->fts->cache,
get_doc->index_cache,
@@ -6072,8 +6082,6 @@ fts_update_hex_format_flag(
return (err);
}
-#ifdef _WIN32
-
/*********************************************************************//**
Rename an aux table to HEX format. It's called when "%016llu" is used
to format an object id in table name, which only happens in Windows. */
@@ -6170,8 +6178,8 @@ This function should make sure that either all the parent table and aux tables
are set DICT_TF2_FTS_AUX_HEX_NAME with flags2 or none of them are set */
static __attribute__((nonnull, warn_unused_result))
dberr_t
-fts_rename_aux_tables_to_hex_format(
-/*================================*/
+fts_rename_aux_tables_to_hex_format_low(
+/*====================================*/
trx_t* trx, /*!< in: transaction */
dict_table_t* parent_table, /*!< in: parent table */
ib_vector_t* tables) /*!< in: aux tables to rename. */
@@ -6295,12 +6303,14 @@ fts_rename_aux_tables_to_hex_format(
"table %s. Please revert manually.",
table->name);
fts_sql_rollback(trx_bg);
+ trx_free_for_background(trx_bg);
/* Continue to clear aux tables' flags2 */
not_rename = true;
continue;
}
fts_sql_commit(trx_bg);
+ trx_free_for_background(trx_bg);
}
DICT_TF2_FLAG_UNSET(parent_table, DICT_TF2_FTS_AUX_HEX_NAME);
@@ -6324,7 +6334,11 @@ fts_fake_hex_to_dec(
ret = sprintf(tmp_id, UINT64PFx, id);
ut_ad(ret == 16);
+#ifdef _WIN32
ret = sscanf(tmp_id, "%016llu", &dec_id);
+#else
+ ret = sscanf(tmp_id, "%016"PRIu64, &dec_id);
+#endif /* _WIN32 */
ut_ad(ret == 1);
return dec_id;
@@ -6346,7 +6360,293 @@ fts_check_aux_table_parent_id_cmp(
return static_cast<int>(fa1->parent_id - fa2->parent_id);
}
-#endif /* _WIN32 */
+/** Mark all the fts index associated with the parent table as corrupted.
+@param[in] trx transaction
+@param[in, out] parent_table fts index associated with this parent table
+ will be marked as corrupted. */
+static
+void
+fts_parent_all_index_set_corrupt(
+ trx_t* trx,
+ dict_table_t* parent_table)
+{
+ fts_t* fts = parent_table->fts;
+
+ if (trx_get_dict_operation(trx) == TRX_DICT_OP_NONE) {
+ trx_set_dict_operation(trx, TRX_DICT_OP_INDEX);
+ }
+
+ for (ulint j = 0; j < ib_vector_size(fts->indexes); j++) {
+ dict_index_t* index = static_cast<dict_index_t*>(
+ ib_vector_getp_const(fts->indexes, j));
+ dict_set_corrupted(index,
+ trx, "DROP ORPHANED TABLE");
+ }
+}
+
+/** Mark the fts index which index id matches the id as corrupted.
+@param[in] trx transaction
+@param[in] id index id to search
+@param[in, out] parent_table parent table to check with all
+ the index. */
+static
+void
+fts_set_index_corrupt(
+ trx_t* trx,
+ index_id_t id,
+ dict_table_t* table)
+{
+ fts_t* fts = table->fts;
+
+ if (trx_get_dict_operation(trx) == TRX_DICT_OP_NONE) {
+ trx_set_dict_operation(trx, TRX_DICT_OP_INDEX);
+ }
+
+ for (ulint j = 0; j < ib_vector_size(fts->indexes); j++) {
+ dict_index_t* index = static_cast<dict_index_t*>(
+ ib_vector_getp_const(fts->indexes, j));
+ if (index->id == id) {
+ dict_set_corrupted(index, trx,
+ "DROP ORPHANED TABLE");
+ break;
+ }
+ }
+}
+
+/** Check the index for the aux table is corrupted.
+@param[in] aux_table auxiliary table
+@retval nonzero if index is corrupted, zero for valid index */
+static
+ulint
+fts_check_corrupt_index(
+ fts_aux_table_t* aux_table)
+{
+ dict_table_t* table;
+ dict_index_t* index;
+ table = dict_table_open_on_id(
+ aux_table->parent_id, TRUE, DICT_TABLE_OP_NORMAL);
+
+ if (table == NULL) {
+ return(0);
+ }
+
+ for (index = UT_LIST_GET_FIRST(table->indexes);
+ index;
+ index = UT_LIST_GET_NEXT(indexes, index)) {
+ if (index->id == aux_table->index_id) {
+ ut_ad(index->type & DICT_FTS);
+ dict_table_close(table, true, false);
+ return(dict_index_is_corrupted(index));
+ }
+ }
+
+ dict_table_close(table, true, false);
+ return(0);
+}
+
+/** Check the validity of the parent table.
+@param[in] aux_table auxiliary table
+@return true if it is a valid table or false if it is not */
+static
+bool
+fts_valid_parent_table(
+ const fts_aux_table_t* aux_table)
+{
+ dict_table_t* parent_table;
+ bool valid = false;
+
+ parent_table = dict_table_open_on_id(
+ aux_table->parent_id, TRUE, DICT_TABLE_OP_NORMAL);
+
+ if (parent_table != NULL && parent_table->fts != NULL) {
+ if (aux_table->index_id == 0) {
+ valid = true;
+ } else {
+ index_id_t id = aux_table->index_id;
+ dict_index_t* index;
+
+ /* Search for the FT index in the table's list. */
+ for (index = UT_LIST_GET_FIRST(parent_table->indexes);
+ index;
+ index = UT_LIST_GET_NEXT(indexes, index)) {
+ if (index->id == id) {
+ valid = true;
+ break;
+ }
+
+ }
+ }
+ }
+
+ if (parent_table) {
+ dict_table_close(parent_table, TRUE, FALSE);
+ }
+
+ return(valid);
+}
+
+/** Try to rename all aux tables of the specified parent table.
+@param[in] aux_tables aux_tables to be renamed
+@param[in] parent_table parent table of all aux
+ tables stored in tables. */
+static
+void
+fts_rename_aux_tables_to_hex_format(
+ ib_vector_t* aux_tables,
+ dict_table_t* parent_table)
+{
+ dberr_t err;
+ trx_t* trx_rename = trx_allocate_for_background();
+ trx_rename->op_info = "Rename aux tables to hex format";
+ trx_rename->dict_operation_lock_mode = RW_X_LATCH;
+ trx_start_for_ddl(trx_rename, TRX_DICT_OP_TABLE);
+
+ err = fts_rename_aux_tables_to_hex_format_low(trx_rename,
+ parent_table, aux_tables);
+
+ trx_rename->dict_operation_lock_mode = 0;
+
+ if (err != DB_SUCCESS) {
+
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Rollback operations on all aux tables of table %s. "
+ "All the fts index associated with the table are "
+ "marked as corrupted. Please rebuild the "
+ "index again.", parent_table->name);
+ fts_sql_rollback(trx_rename);
+
+ /* Corrupting the fts index related to parent table. */
+ trx_t* trx_corrupt;
+ trx_corrupt = trx_allocate_for_background();
+ trx_corrupt->dict_operation_lock_mode = RW_X_LATCH;
+ trx_start_for_ddl(trx_corrupt, TRX_DICT_OP_TABLE);
+ fts_parent_all_index_set_corrupt(trx_corrupt, parent_table);
+ trx_corrupt->dict_operation_lock_mode = 0;
+ fts_sql_commit(trx_corrupt);
+ trx_free_for_background(trx_corrupt);
+ } else {
+ fts_sql_commit(trx_rename);
+ }
+
+ trx_free_for_background(trx_rename);
+ ib_vector_reset(aux_tables);
+}
+
+/** Set the hex format flag for the parent table.
+@param[in, out] parent_table parent table
+@param[in] trx transaction */
+static
+void
+fts_set_parent_hex_format_flag(
+ dict_table_t* parent_table,
+ trx_t* trx)
+{
+ if (!DICT_TF2_FLAG_IS_SET(parent_table,
+ DICT_TF2_FTS_AUX_HEX_NAME)) {
+ DBUG_EXECUTE_IF("parent_table_flag_fail",
+ ib_logf(IB_LOG_LEVEL_FATAL,
+ "Setting parent table %s to hex format "
+ "failed. Please try to restart the server "
+ "again, if it doesn't work, the system "
+ "tables might be corrupted.",
+ parent_table->name);
+ return;);
+
+ dberr_t err = fts_update_hex_format_flag(
+ trx, parent_table->id, true);
+
+ if (err != DB_SUCCESS) {
+ ib_logf(IB_LOG_LEVEL_FATAL,
+ "Setting parent table %s to hex format "
+ "failed. Please try to restart the server "
+ "again, if it doesn't work, the system "
+ "tables might be corrupted.",
+ parent_table->name);
+ } else {
+ DICT_TF2_FLAG_SET(
+ parent_table, DICT_TF2_FTS_AUX_HEX_NAME);
+ }
+ }
+}
+
+/** Drop the obsolete auxilary table.
+@param[in] tables tables to be dropped. */
+static
+void
+fts_drop_obsolete_aux_table_from_vector(
+ ib_vector_t* tables)
+{
+ dberr_t err;
+
+ for (ulint count = 0; count < ib_vector_size(tables);
+ ++count) {
+
+ fts_aux_table_t* aux_drop_table;
+ aux_drop_table = static_cast<fts_aux_table_t*>(
+ ib_vector_get(tables, count));
+ trx_t* trx_drop = trx_allocate_for_background();
+ trx_drop->op_info = "Drop obsolete aux tables";
+ trx_drop->dict_operation_lock_mode = RW_X_LATCH;
+ trx_start_for_ddl(trx_drop, TRX_DICT_OP_TABLE);
+
+ err = row_drop_table_for_mysql(
+ aux_drop_table->name, trx_drop, false, true);
+
+ trx_drop->dict_operation_lock_mode = 0;
+
+ if (err != DB_SUCCESS) {
+ /* We don't need to worry about the
+ failure, since server would try to
+ drop it on next restart, even if
+ the table was broken. */
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Fail to drop obsolete aux table '%s', which "
+ "is harmless. will try to drop it on next "
+ "restart.", aux_drop_table->name);
+ fts_sql_rollback(trx_drop);
+ } else {
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Dropped obsolete aux table '%s'.",
+ aux_drop_table->name);
+
+ fts_sql_commit(trx_drop);
+ }
+
+ trx_free_for_background(trx_drop);
+ }
+}
+
+/** Drop all the auxiliary table present in the vector.
+@param[in] trx transaction
+@param[in] tables tables to be dropped */
+static
+void
+fts_drop_aux_table_from_vector(
+ trx_t* trx,
+ ib_vector_t* tables)
+{
+ for (ulint count = 0; count < ib_vector_size(tables);
+ ++count) {
+ fts_aux_table_t* aux_drop_table;
+ aux_drop_table = static_cast<fts_aux_table_t*>(
+ ib_vector_get(tables, count));
+
+ /* Check for the validity of the parent table */
+ if (!fts_valid_parent_table(aux_drop_table)) {
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Parent table of FTS auxiliary table %s not "
+ "found.", aux_drop_table->name);
+ dberr_t err = fts_drop_table(trx, aux_drop_table->name);
+ if (err == DB_FAIL) {
+ char* path = fil_make_ibd_name(
+ aux_drop_table->name, false);
+ os_file_delete_if_exists(innodb_file_data_key,
+ path);
+ mem_free(path);
+ }
+ }
+ }
+}
/**********************************************************************//**
Check and drop all orphaned FTS auxiliary tables, those that don't have
@@ -6359,9 +6659,12 @@ fts_check_and_drop_orphaned_tables(
trx_t* trx, /*!< in: transaction */
ib_vector_t* tables) /*!< in: tables to check */
{
-#ifdef _WIN32
mem_heap_t* heap;
ib_vector_t* aux_tables_to_rename;
+ ib_vector_t* invalid_aux_tables;
+ ib_vector_t* valid_aux_tables;
+ ib_vector_t* drop_aux_tables;
+ ib_vector_t* obsolete_aux_tables;
ib_alloc_t* heap_alloc;
heap = mem_heap_create(1024);
@@ -6372,38 +6675,99 @@ fts_check_and_drop_orphaned_tables(
aux_tables_to_rename = ib_vector_create(heap_alloc,
sizeof(fts_aux_table_t), 128);
+ /* We store all fake auxiliary table and orphaned table here. */
+ invalid_aux_tables = ib_vector_create(heap_alloc,
+ sizeof(fts_aux_table_t), 128);
+
+ /* We store all valid aux tables. We use this to filter the
+ fake auxiliary table from invalid auxiliary tables. */
+ valid_aux_tables = ib_vector_create(heap_alloc,
+ sizeof(fts_aux_table_t), 128);
+
+ /* We store all auxiliary tables to be dropped. */
+ drop_aux_tables = ib_vector_create(heap_alloc,
+ sizeof(fts_aux_table_t), 128);
+
+ /* We store all obsolete auxiliary tables to be dropped. */
+ obsolete_aux_tables = ib_vector_create(heap_alloc,
+ sizeof(fts_aux_table_t), 128);
+
/* Sort by parent_id first, in case rename will fail */
ib_vector_sort(tables, fts_check_aux_table_parent_id_cmp);
-#endif /* _WIN32 */
for (ulint i = 0; i < ib_vector_size(tables); ++i) {
dict_table_t* parent_table;
fts_aux_table_t* aux_table;
bool drop = false;
-#ifdef _WIN32
dict_table_t* table;
fts_aux_table_t* next_aux_table = NULL;
ib_id_t orig_parent_id = 0;
+ ib_id_t orig_index_id = 0;
bool rename = false;
-#endif /* _WIN32 */
aux_table = static_cast<fts_aux_table_t*>(
ib_vector_get(tables, i));
-#ifdef _WIN32
table = dict_table_open_on_id(
aux_table->id, TRUE, DICT_TABLE_OP_NORMAL);
orig_parent_id = aux_table->parent_id;
+ orig_index_id = aux_table->index_id;
if (table == NULL || strcmp(table->name, aux_table->name)) {
- /* Skip these aux tables, which are common tables
- with wrong table ids */
- if (table) {
+
+ bool fake_aux = false;
+
+ if (table != NULL) {
dict_table_close(table, TRUE, FALSE);
}
- continue;
+ if (i + 1 < ib_vector_size(tables)) {
+ next_aux_table = static_cast<fts_aux_table_t*>(
+ ib_vector_get(tables, i + 1));
+ }
+
+ /* To know whether aux table is fake fts or
+ orphan fts table. */
+ for (ulint count = 0;
+ count < ib_vector_size(valid_aux_tables);
+ count++) {
+ fts_aux_table_t* valid_aux;
+ valid_aux = static_cast<fts_aux_table_t*>(
+ ib_vector_get(valid_aux_tables, count));
+ if (strcmp(valid_aux->name,
+ aux_table->name) == 0) {
+ fake_aux = true;
+ break;
+ }
+ }
+ /* All aux tables of parent table, whose id is
+ last_parent_id, have been checked, try to rename
+ them if necessary. */
+ if ((next_aux_table == NULL
+ || orig_parent_id != next_aux_table->parent_id)
+ && (!ib_vector_is_empty(aux_tables_to_rename))) {
+
+ ulint parent_id = fts_fake_hex_to_dec(
+ aux_table->parent_id);
+
+ parent_table = dict_table_open_on_id(
+ parent_id, TRUE,
+ DICT_TABLE_OP_NORMAL);
+
+ fts_rename_aux_tables_to_hex_format(
+ aux_tables_to_rename, parent_table);
+
+ dict_table_close(parent_table, TRUE,
+ FALSE);
+ }
+
+ /* If the aux table is fake aux table. Skip it. */
+ if (!fake_aux) {
+ ib_vector_push(invalid_aux_tables, aux_table);
+ }
+
+ continue;
} else if (!DICT_TF2_FLAG_IS_SET(table,
DICT_TF2_FTS_AUX_HEX_NAME)) {
@@ -6416,65 +6780,99 @@ fts_check_and_drop_orphaned_tables(
}
ut_ad(aux_table->id > aux_table->parent_id);
- rename = true;
- }
- if (table) {
- dict_table_close(table, TRUE, FALSE);
- }
-#endif /* _WIN32 */
+ /* Check whether parent table id and index id
+ are stored as decimal format. */
+ if (fts_valid_parent_table(aux_table)) {
- parent_table = dict_table_open_on_id(
- aux_table->parent_id, TRUE, DICT_TABLE_OP_NORMAL);
+ parent_table = dict_table_open_on_id(
+ aux_table->parent_id, true,
+ DICT_TABLE_OP_NORMAL);
- if (parent_table == NULL || parent_table->fts == NULL) {
+ ut_ad(parent_table != NULL);
+ ut_ad(parent_table->fts != NULL);
- drop = true;
-
- } else if (aux_table->index_id != 0) {
- index_id_t id;
- fts_t* fts;
+ if (!DICT_TF2_FLAG_IS_SET(
+ parent_table,
+ DICT_TF2_FTS_AUX_HEX_NAME)) {
+ rename = true;
+ }
- drop = true;
- fts = parent_table->fts;
- id = aux_table->index_id;
+ dict_table_close(parent_table, TRUE, FALSE);
+ }
- /* Search for the FT index in the table's list. */
- for (ulint j = 0;
- j < ib_vector_size(fts->indexes);
- ++j) {
+ if (!rename) {
+ /* Reassign the original value of
+ aux table if it is not in decimal format */
+ aux_table->parent_id = orig_parent_id;
+ aux_table->index_id = orig_index_id;
+ }
+ }
- const dict_index_t* index;
+ if (table != NULL) {
+ dict_table_close(table, true, false);
+ }
- index = static_cast<const dict_index_t*>(
- ib_vector_getp_const(fts->indexes, j));
+ if (!rename) {
+ /* Check the validity of the parent table. */
+ if (!fts_valid_parent_table(aux_table)) {
+ drop = true;
+ }
+ }
- if (index->id == id) {
- drop = false;
- break;
- }
+ /* Filter out the fake aux table by comparing with the
+ current valid auxiliary table name . */
+ for (ulint count = 0;
+ count < ib_vector_size(invalid_aux_tables); count++) {
+ fts_aux_table_t* invalid_aux;
+ invalid_aux = static_cast<fts_aux_table_t*>(
+ ib_vector_get(invalid_aux_tables, count));
+ if (strcmp(invalid_aux->name, aux_table->name) == 0) {
+ ib_vector_remove(
+ invalid_aux_tables,
+ *reinterpret_cast<void**>(invalid_aux));
+ break;
}
}
- if (drop) {
+ ib_vector_push(valid_aux_tables, aux_table);
- ib_logf(IB_LOG_LEVEL_WARN,
- "Parent table of FTS auxiliary table %s not "
- "found.", aux_table->name);
+ /* If the index associated with aux table is corrupted,
+ skip it. */
+ if (fts_check_corrupt_index(aux_table) > 0) {
- dberr_t err = fts_drop_table(trx, aux_table->name);
+ if (i + 1 < ib_vector_size(tables)) {
+ next_aux_table = static_cast<fts_aux_table_t*>(
+ ib_vector_get(tables, i + 1));
+ }
- if (err == DB_FAIL) {
- char* path;
+ if (next_aux_table == NULL
+ || orig_parent_id != next_aux_table->parent_id) {
- path = fil_make_ibd_name(
- aux_table->name, false);
+ parent_table = dict_table_open_on_id(
+ aux_table->parent_id, TRUE,
+ DICT_TABLE_OP_NORMAL);
- os_file_delete_if_exists(innodb_file_data_key,
- path);
+ if (!ib_vector_is_empty(aux_tables_to_rename)) {
+ fts_rename_aux_tables_to_hex_format(
+ aux_tables_to_rename, parent_table);
- mem_free(path);
+ } else {
+ fts_set_parent_hex_format_flag(
+ parent_table, trx);
+ }
+
+ dict_table_close(parent_table, TRUE, FALSE);
}
+
+ continue;
+ }
+
+ parent_table = dict_table_open_on_id(
+ aux_table->parent_id, TRUE, DICT_TABLE_OP_NORMAL);
+
+ if (drop) {
+ ib_vector_push(drop_aux_tables, aux_table);
} else {
if (FTS_IS_OBSOLETE_AUX_TABLE(aux_table->name)) {
@@ -6484,49 +6882,13 @@ fts_check_and_drop_orphaned_tables(
This could happen when we try to upgrade
from older server to later one, which doesn't
contain these obsolete tables. */
- drop = true;
-
- dberr_t err;
- trx_t* trx_drop =
- trx_allocate_for_background();
-
- trx_drop->op_info = "Drop obsolete aux tables";
- trx_drop->dict_operation_lock_mode = RW_X_LATCH;
-
- trx_start_for_ddl(trx_drop, TRX_DICT_OP_TABLE);
-
- err = row_drop_table_for_mysql(
- aux_table->name, trx_drop, false, true);
-
- trx_drop->dict_operation_lock_mode = 0;
-
- if (err != DB_SUCCESS) {
- /* We don't need to worry about the
- failure, since server would try to
- drop it on next restart, even if
- the table was broken. */
-
- ib_logf(IB_LOG_LEVEL_WARN,
- "Fail to drop obsolete aux"
- " table '%s', which is"
- " harmless. will try to drop"
- " it on next restart.",
- aux_table->name);
-
- fts_sql_rollback(trx_drop);
- } else {
- ib_logf(IB_LOG_LEVEL_INFO,
- "Dropped obsolete aux"
- " table '%s'.",
- aux_table->name);
-
- fts_sql_commit(trx_drop);
- }
-
- trx_free_for_background(trx_drop);
+ ib_vector_push(obsolete_aux_tables, aux_table);
+ continue;
}
}
-#ifdef _WIN32
+
+ /* If the aux table is in decimal format, we should
+ rename it, so push it to aux_tables_to_rename */
if (!drop && rename) {
ib_vector_push(aux_tables_to_rename, aux_table);
}
@@ -6544,38 +6906,16 @@ fts_check_and_drop_orphaned_tables(
them if necessary. We had better use a new background
trx to rename rather than the original trx, in case
any failure would cause a complete rollback. */
- dberr_t err;
- trx_t* trx_rename = trx_allocate_for_background();
- trx_rename->op_info = "Rename aux tables to "
- "hex format";
- trx_rename->dict_operation_lock_mode = RW_X_LATCH;
- trx_start_for_ddl(trx_rename, TRX_DICT_OP_TABLE);
+ ut_ad(rename);
+ ut_ad(!DICT_TF2_FLAG_IS_SET(
+ parent_table, DICT_TF2_FTS_AUX_HEX_NAME));
- err = fts_rename_aux_tables_to_hex_format(trx_rename,
- parent_table, aux_tables_to_rename);
-
- trx_rename->dict_operation_lock_mode = 0;
-
- if (err != DB_SUCCESS) {
- ib_logf(IB_LOG_LEVEL_WARN,
- "Rollback operations on all "
- "aux tables of table %s. "
- "Please check why renaming aux tables "
- "failed, and restart the server to "
- "upgrade again to "
- "get the table work.",
- parent_table->name);
-
- fts_sql_rollback(trx_rename);
- } else {
- fts_sql_commit(trx_rename);
- }
-
- trx_free_for_background(trx_rename);
- ib_vector_reset(aux_tables_to_rename);
+ fts_rename_aux_tables_to_hex_format(
+ aux_tables_to_rename,parent_table);
}
-#else /* _WIN32 */
- if (!drop) {
+
+ /* The IDs are already in correct hex format. */
+ if (!drop && !rename) {
dict_table_t* table;
table = dict_table_open_on_id(
@@ -6590,6 +6930,16 @@ fts_check_and_drop_orphaned_tables(
&& !DICT_TF2_FLAG_IS_SET(
table,
DICT_TF2_FTS_AUX_HEX_NAME)) {
+
+ DBUG_EXECUTE_IF("aux_table_flag_fail",
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Setting aux table %s to hex "
+ "format failed.", table->name);
+ fts_set_index_corrupt(
+ trx, aux_table->index_id,
+ parent_table);
+ goto table_exit;);
+
dberr_t err = fts_update_hex_format_flag(
trx, table->id, true);
@@ -6597,49 +6947,44 @@ fts_check_and_drop_orphaned_tables(
ib_logf(IB_LOG_LEVEL_WARN,
"Setting aux table %s to hex "
"format failed.", table->name);
+
+ fts_set_index_corrupt(
+ trx, aux_table->index_id,
+ parent_table);
} else {
DICT_TF2_FLAG_SET(table,
DICT_TF2_FTS_AUX_HEX_NAME);
}
}
+#ifndef DBUG_OFF
+table_exit:
+#endif /* !DBUG_OFF */
if (table != NULL) {
dict_table_close(table, TRUE, FALSE);
}
ut_ad(parent_table != NULL);
- if (!DICT_TF2_FLAG_IS_SET(parent_table,
- DICT_TF2_FTS_AUX_HEX_NAME)) {
- dberr_t err = fts_update_hex_format_flag(
- trx, parent_table->id, true);
- if (err != DB_SUCCESS) {
- ib_logf(IB_LOG_LEVEL_WARN,
- "Setting parent table %s of "
- "FTS auxiliary %s to hex "
- "format failed.",
- parent_table->name,
- aux_table->name);
- } else {
- DICT_TF2_FLAG_SET(parent_table,
- DICT_TF2_FTS_AUX_HEX_NAME);
- }
- }
+ fts_set_parent_hex_format_flag(
+ parent_table, trx);
}
-#endif /* _WIN32 */
-
- if (parent_table) {
+ if (parent_table != NULL) {
dict_table_close(parent_table, TRUE, FALSE);
}
}
-#ifdef _WIN32
+ fts_drop_aux_table_from_vector(trx, invalid_aux_tables);
+ fts_drop_aux_table_from_vector(trx, drop_aux_tables);
+ fts_sql_commit(trx);
+
+ fts_drop_obsolete_aux_table_from_vector(obsolete_aux_tables);
+
/* Free the memory allocated at the beginning */
if (heap != NULL) {
mem_heap_free(heap);
}
-#endif /* _WIN32 */
}
/**********************************************************************//**
@@ -6738,7 +7083,6 @@ fts_drop_orphaned_tables(void)
if (error == DB_SUCCESS) {
fts_check_and_drop_orphaned_tables(trx, tables);
- fts_sql_commit(trx);
break; /* Exit the loop. */
} else {
ib_vector_reset(tables);
diff --git a/storage/xtradb/fts/fts0opt.cc b/storage/xtradb/fts/fts0opt.cc
index 910a00cd521..2e2bd061d07 100644
--- a/storage/xtradb/fts/fts0opt.cc
+++ b/storage/xtradb/fts/fts0opt.cc
@@ -190,6 +190,8 @@ cycle for a table. */
struct fts_slot_t {
dict_table_t* table; /*!< Table to optimize */
+ table_id_t table_id; /*!< Table id */
+
fts_state_t state; /*!< State of this slot */
ulint added; /*!< Number of doc ids added since the
@@ -2575,6 +2577,8 @@ fts_optimize_add_table(
return;
}
+ ut_ad(table->cached && table->fts != NULL);
+
/* Make sure table with FTS index cannot be evicted */
if (table->can_be_evicted) {
dict_table_move_from_lru_to_non_lru(table);
@@ -2741,6 +2745,7 @@ fts_optimize_new_table(
memset(slot, 0x0, sizeof(*slot));
slot->table = table;
+ slot->table_id = table->id;
slot->state = FTS_STATE_LOADED;
slot->interval_time = FTS_OPTIMIZE_INTERVAL_IN_SECS;
@@ -2865,7 +2870,8 @@ fts_is_sync_needed(
slot = static_cast<const fts_slot_t*>(
ib_vector_get_const(tables, i));
- if (slot->table && slot->table->fts) {
+ if (slot->state != FTS_STATE_EMPTY && slot->table
+ && slot->table->fts) {
total_memory += slot->table->fts->cache->total_size;
}
@@ -2948,6 +2954,7 @@ fts_optimize_thread(
ib_wqueue_t* wq = (ib_wqueue_t*) arg;
ut_ad(!srv_read_only_mode);
+ my_thread_init();
heap = mem_heap_create(sizeof(dict_table_t*) * 64);
heap_alloc = ib_heap_allocator_create(heap);
@@ -3076,9 +3083,11 @@ fts_optimize_thread(
if (slot->state != FTS_STATE_EMPTY) {
dict_table_t* table = NULL;
- table = dict_table_open_on_name(
- slot->table->name, FALSE, FALSE,
- DICT_ERR_IGNORE_INDEX_ROOT);
+ /*slot->table may be freed, so we try to open
+ table by slot->table_id.*/
+ table = dict_table_open_on_id(
+ slot->table_id, FALSE,
+ DICT_TABLE_OP_NORMAL);
if (table) {
@@ -3101,6 +3110,7 @@ fts_optimize_thread(
ib_logf(IB_LOG_LEVEL_INFO, "FTS optimize thread exiting.");
os_event_set(exit_event);
+ my_thread_end();
/* We count the number of threads in os_thread_exit(). A created
thread should always use that to exit and not use return() to exit. */
diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc
index a91dbddb007..b697707446e 100644
--- a/storage/xtradb/handler/ha_innodb.cc
+++ b/storage/xtradb/handler/ha_innodb.cc
@@ -3332,7 +3332,8 @@ innobase_init(
innobase_hton->flush_logs = innobase_flush_logs;
innobase_hton->show_status = innobase_show_status;
- innobase_hton->flags = HTON_SUPPORTS_EXTENDED_KEYS;
+ innobase_hton->flags = HTON_SUPPORTS_EXTENDED_KEYS |
+ HTON_SUPPORTS_FOREIGN_KEYS;
innobase_hton->release_temporary_latches =
innobase_release_temporary_latches;
@@ -14212,6 +14213,7 @@ ha_innobase::start_stmt(
thr_lock_type lock_type)
{
trx_t* trx;
+ DBUG_ENTER("ha_innobase::start_stmt");
update_thd(thd);
@@ -14235,6 +14237,29 @@ ha_innobase::start_stmt(
prebuilt->hint_need_to_fetch_extra_cols = 0;
reset_template();
+ if (dict_table_is_temporary(prebuilt->table)
+ && prebuilt->mysql_has_locked
+ && prebuilt->select_lock_type == LOCK_NONE) {
+ dberr_t error;
+
+ switch (thd_sql_command(thd)) {
+ case SQLCOM_INSERT:
+ case SQLCOM_UPDATE:
+ case SQLCOM_DELETE:
+ init_table_handle_for_HANDLER();
+ prebuilt->select_lock_type = LOCK_X;
+ prebuilt->stored_select_lock_type = LOCK_X;
+ error = row_lock_table_for_mysql(prebuilt, NULL, 1);
+
+ if (error != DB_SUCCESS) {
+ int st = convert_error_code_to_mysql(
+ error, 0, thd);
+ DBUG_RETURN(st);
+ }
+ break;
+ }
+ }
+
if (!prebuilt->mysql_has_locked) {
/* This handle is for a temporary table created inside
this same LOCK TABLES; since MySQL does NOT call external_lock
@@ -14272,7 +14297,7 @@ ha_innobase::start_stmt(
++trx->will_lock;
}
- return(0);
+ DBUG_RETURN(0);
}
/******************************************************************//**
@@ -20371,3 +20396,27 @@ bool ha_innobase::is_thd_killed()
return thd_kill_level(user_thd);
}
+/**********************************************************************
+Issue a warning that the row is too big. */
+void
+ib_warn_row_too_big(const dict_table_t* table)
+{
+ /* If prefix is true then a 768-byte prefix is stored
+ locally for BLOB fields. Refer to dict_table_get_format() */
+ const bool prefix = (dict_tf_get_format(table->flags)
+ == UNIV_FORMAT_A);
+
+ const ulint free_space = page_get_free_space_of_empty(
+ table->flags & DICT_TF_COMPACT) / 2;
+
+ THD* thd = current_thd;
+
+ push_warning_printf(
+ thd, Sql_condition::WARN_LEVEL_WARN, HA_ERR_TO_BIG_ROW,
+ "Row size too large (> %lu). Changing some columns to TEXT"
+ " or BLOB %smay help. In current row format, BLOB prefix of"
+ " %d bytes is stored inline.", free_space
+ , prefix ? "or using ROW_FORMAT=DYNAMIC or"
+ " ROW_FORMAT=COMPRESSED ": ""
+ , prefix ? DICT_MAX_FIXED_COL_LEN : 0);
+}
diff --git a/storage/xtradb/handler/handler0alter.cc b/storage/xtradb/handler/handler0alter.cc
index 34bf5f7ea6e..7e87332fe37 100644
--- a/storage/xtradb/handler/handler0alter.cc
+++ b/storage/xtradb/handler/handler0alter.cc
@@ -3378,9 +3378,7 @@ ha_innobase::prepare_inplace_alter_table(
ulint fts_doc_col_no = ULINT_UNDEFINED;
bool add_fts_doc_id = false;
bool add_fts_doc_id_idx = false;
-#ifdef _WIN32
bool add_fts_idx = false;
-#endif /* _WIN32 */
DBUG_ENTER("prepare_inplace_alter_table");
DBUG_ASSERT(!ha_alter_info->handler_ctx);
@@ -3529,9 +3527,7 @@ check_if_ok_to_rename:
& ~(HA_FULLTEXT
| HA_PACK_KEY
| HA_BINARY_PACK_KEY)));
-#ifdef _WIN32
add_fts_idx = true;
-#endif /* _WIN32 */
continue;
}
@@ -3542,19 +3538,16 @@ check_if_ok_to_rename:
}
}
-#ifdef _WIN32
/* We won't be allowed to add fts index to a table with
fts indexes already but without AUX_HEX_NAME set.
This means the aux tables of the table failed to
rename to hex format but new created aux tables
- shall be in hex format, which is contradictory.
- It's only for Windows. */
+ shall be in hex format, which is contradictory. */
if (!DICT_TF2_FLAG_IS_SET(indexed_table, DICT_TF2_FTS_AUX_HEX_NAME)
&& indexed_table->fts != NULL && add_fts_idx) {
my_error(ER_INNODB_FT_AUX_NOT_HEX_ID, MYF(0));
goto err_exit_no_heap;
}
-#endif /* _WIN32 */
/* Check existing index definitions for too-long column
prefixes as well, in case max_col_len shrunk. */
diff --git a/storage/xtradb/ibuf/ibuf0ibuf.cc b/storage/xtradb/ibuf/ibuf0ibuf.cc
index 3969d284a97..ef6c9c74558 100644
--- a/storage/xtradb/ibuf/ibuf0ibuf.cc
+++ b/storage/xtradb/ibuf/ibuf0ibuf.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1997, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1997, 2014, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -4660,7 +4660,7 @@ ibuf_merge_or_delete_for_page(
function. When the counter is > 0, that prevents tablespace
from being dropped. */
- tablespace_being_deleted = fil_inc_pending_ops(space);
+ tablespace_being_deleted = fil_inc_pending_ops(space, true);
if (UNIV_UNLIKELY(tablespace_being_deleted)) {
/* Do not try to read the bitmap page from space;
diff --git a/storage/xtradb/include/btr0cur.ic b/storage/xtradb/include/btr0cur.ic
index 080866c7465..43ee3304c0e 100644
--- a/storage/xtradb/include/btr0cur.ic
+++ b/storage/xtradb/include/btr0cur.ic
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2014, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -28,7 +28,7 @@ Created 10/16/1994 Heikki Tuuri
#ifdef UNIV_DEBUG
# define LIMIT_OPTIMISTIC_INSERT_DEBUG(NREC, CODE)\
-if (btr_cur_limit_optimistic_insert_debug\
+if (btr_cur_limit_optimistic_insert_debug > 1\
&& (NREC) >= (ulint)btr_cur_limit_optimistic_insert_debug) {\
CODE;\
}
diff --git a/storage/xtradb/include/buf0buf.h b/storage/xtradb/include/buf0buf.h
index ad10b8d1eb2..3d6a56f22a9 100644
--- a/storage/xtradb/include/buf0buf.h
+++ b/storage/xtradb/include/buf0buf.h
@@ -1367,7 +1367,10 @@ buf_pool_watch_is_sentinel(
const buf_page_t* bpage) /*!< in: block */
__attribute__((nonnull, warn_unused_result));
/****************************************************************//**
-Add watch for the given page to be read in. Caller must have the buffer pool
+Add watch for the given page to be read in. Caller must have
+appropriate hash_lock for the bpage and hold the LRU list mutex to avoid a race
+condition with buf_LRU_free_page inserting the same page into the page hash.
+This function may release the hash_lock and reacquire it.
@return NULL if watch set, block if the page is in the buffer pool */
UNIV_INTERN
buf_page_t*
diff --git a/storage/xtradb/include/dict0dict.h b/storage/xtradb/include/dict0dict.h
index 136f7b52aba..88203595108 100644
--- a/storage/xtradb/include/dict0dict.h
+++ b/storage/xtradb/include/dict0dict.h
@@ -85,7 +85,7 @@ dict_get_referenced_table(
mem_heap_t* heap); /*!< in: heap memory */
/*********************************************************************//**
Frees a foreign key struct. */
-UNIV_INTERN
+
void
dict_foreign_free(
/*==============*/
diff --git a/storage/xtradb/include/fil0fil.h b/storage/xtradb/include/fil0fil.h
index d6be9fae31c..7edf79043d3 100644
--- a/storage/xtradb/include/fil0fil.h
+++ b/storage/xtradb/include/fil0fil.h
@@ -586,7 +586,8 @@ UNIV_INTERN
ibool
fil_inc_pending_ops(
/*================*/
- ulint id); /*!< in: space id */
+ ulint id, /*!< in: space id */
+ ibool print_err); /*!< in: need to print error or not */
/*******************************************************************//**
Decrements the count of pending operations. */
UNIV_INTERN
diff --git a/storage/xtradb/include/fts0priv.ic b/storage/xtradb/include/fts0priv.ic
index 8ef877f267e..2d07c60f980 100644
--- a/storage/xtradb/include/fts0priv.ic
+++ b/storage/xtradb/include/fts0priv.ic
@@ -37,18 +37,38 @@ fts_write_object_id(
/* in: true for fixed hex format,
false for old ambiguous format */
{
+
#ifdef _WIN32
- /* Use this to construct old(5.6.14 and 5.7.3) ambiguous
- aux table names */
+
+ DBUG_EXECUTE_IF("innodb_test_wrong_non_windows_fts_aux_table_name",
+ return(sprintf(str, UINT64PFx, id)););
+
+ /* Use this to construct old(5.6.14 and 5.7.3) windows
+ ambiguous aux table names */
DBUG_EXECUTE_IF("innodb_test_wrong_fts_aux_table_name",
return(sprintf(str, "%016llu", id)););
+#else /* _WIN32 */
+
+ /* Use this to construct old(5.6.14 and 5.7.3) windows
+ ambiguous aux table names */
+ DBUG_EXECUTE_IF("innodb_test_wrong_windows_fts_aux_table_name",
+ return(sprintf(str, "%016"PRIu64, id)););
+
+ DBUG_EXECUTE_IF("innodb_test_wrong_fts_aux_table_name",
+ return(sprintf(str, UINT64PFx, id)););
+
+#endif /* _WIN32 */
+
/* As above, but this is only for those tables failing to rename. */
if (!hex_format) {
+#ifdef _WIN32
// FIXME: Use ut_snprintf(), so does following one.
return(sprintf(str, "%016llu", id));
- }
+#else /* _WIN32 */
+ return(sprintf(str, "%016"PRIu64, id));
#endif /* _WIN32 */
+ }
return(sprintf(str, UINT64PFx, id));
}
diff --git a/storage/xtradb/include/os0file.h b/storage/xtradb/include/os0file.h
index 0c9f634266a..e70ae05f7c4 100644
--- a/storage/xtradb/include/os0file.h
+++ b/storage/xtradb/include/os0file.h
@@ -144,6 +144,7 @@ enum os_file_create_t {
/* @} */
/** Error codes from os_file_get_last_error @{ */
+#define OS_FILE_NAME_TOO_LONG 36
#define OS_FILE_NOT_FOUND 71
#define OS_FILE_DISK_FULL 72
#define OS_FILE_ALREADY_EXISTS 73
diff --git a/storage/xtradb/include/os0stacktrace.h b/storage/xtradb/include/os0stacktrace.h
index 58e9a528593..e79347c6189 100644
--- a/storage/xtradb/include/os0stacktrace.h
+++ b/storage/xtradb/include/os0stacktrace.h
@@ -20,7 +20,7 @@ this program; if not, write to the Free Software Foundation, Inc.,
#ifndef os0stacktrace_h
#define os0stacktrace_h
-#ifdef __linux__
+#if defined (__linux__) && HAVE_BACKTRACE && HAVE_BACKTRACE_SYMBOLS
#if HAVE_EXECINFO_H
#include <execinfo.h>
#endif
@@ -40,5 +40,5 @@ os_stacktrace_print(
siginfo_t* info, /*!< in: signal information */
void* ucontext);/*!< in: signal context */
-#endif /* __linux__ */
+#endif /* defined (__linux__) && HAVE_BACKTRACE && HAVE_BACKTRACE_SYMBOLS */
#endif /* os0stacktrace.h */
diff --git a/storage/xtradb/include/trx0rec.h b/storage/xtradb/include/trx0rec.h
index 50da55d2ea3..96e7d595035 100644
--- a/storage/xtradb/include/trx0rec.h
+++ b/storage/xtradb/include/trx0rec.h
@@ -233,7 +233,7 @@ trx_undo_report_row_operation(
inserted undo log record,
0 if BTR_NO_UNDO_LOG
flag was specified */
- __attribute__((nonnull(3,4,10), warn_unused_result));
+ __attribute__((nonnull(4,10), warn_unused_result));
/******************************************************************//**
Copies an undo record to heap. This function can be called if we know that
the undo log record exists.
diff --git a/storage/xtradb/include/univ.i b/storage/xtradb/include/univ.i
index dc677e4c50c..ec2c321e272 100644
--- a/storage/xtradb/include/univ.i
+++ b/storage/xtradb/include/univ.i
@@ -44,10 +44,10 @@ Created 1/20/1994 Heikki Tuuri
#define INNODB_VERSION_MAJOR 5
#define INNODB_VERSION_MINOR 6
-#define INNODB_VERSION_BUGFIX 21
+#define INNODB_VERSION_BUGFIX 22
#ifndef PERCONA_INNODB_VERSION
-#define PERCONA_INNODB_VERSION 70.0
+#define PERCONA_INNODB_VERSION 71.0
#endif
/* Enable UNIV_LOG_ARCHIVE in XtraDB */
@@ -121,6 +121,10 @@ if we are compiling on Windows. */
# include <sched.h>
# endif
+# ifdef HAVE_MALLOC_H
+# include <malloc.h>
+# endif
+
/* We only try to do explicit inlining of functions with gcc and
Sun Studio */
diff --git a/storage/xtradb/lock/lock0lock.cc b/storage/xtradb/lock/lock0lock.cc
index f0c7305768c..dd572130016 100644
--- a/storage/xtradb/lock/lock0lock.cc
+++ b/storage/xtradb/lock/lock0lock.cc
@@ -5957,6 +5957,7 @@ loop:
ulint space = lock->un_member.rec_lock.space;
ulint zip_size= fil_space_get_zip_size(space);
ulint page_no = lock->un_member.rec_lock.page_no;
+ ibool tablespace_being_deleted = FALSE;
if (UNIV_UNLIKELY(zip_size == ULINT_UNDEFINED)) {
@@ -5977,14 +5978,31 @@ loop:
if (srv_show_verbose_locks) {
- mtr_start(&mtr);
+ DEBUG_SYNC_C("innodb_monitor_before_lock_page_read");
- buf_page_get_gen(space, zip_size, page_no,
- RW_NO_LATCH, NULL,
- BUF_GET_POSSIBLY_FREED,
- __FILE__, __LINE__, &mtr);
+ /* Check if the space is exists or not. only
+ when the space is valid, try to get the page. */
+ tablespace_being_deleted
+ = fil_inc_pending_ops(space, false);
- mtr_commit(&mtr);
+ if (!tablespace_being_deleted) {
+ mtr_start(&mtr);
+
+ buf_page_get_gen(space, zip_size,
+ page_no, RW_NO_LATCH,
+ NULL,
+ BUF_GET_POSSIBLY_FREED,
+ __FILE__, __LINE__,
+ &mtr);
+
+ mtr_commit(&mtr);
+
+ fil_decr_pending_ops(space);
+ } else {
+ fprintf(file, "RECORD LOCKS on"
+ " non-existing space %lu\n",
+ (ulong) space);
+ }
}
load_page_first = FALSE;
@@ -6412,7 +6430,7 @@ lock_rec_block_validate(
/* Make sure that the tablespace is not deleted while we are
trying to access the page. */
- if (!fil_inc_pending_ops(space)) {
+ if (!fil_inc_pending_ops(space, true)) {
mtr_start(&mtr);
block = buf_page_get_gen(
space, fil_space_get_zip_size(space),
@@ -6515,6 +6533,7 @@ lock_rec_insert_check_and_lock(
ut_ad(!dict_index_is_online_ddl(index)
|| dict_index_is_clust(index)
|| (flags & BTR_CREATE_FLAG));
+ ut_ad((flags & BTR_NO_LOCKING_FLAG) || thr);
if (flags & BTR_NO_LOCKING_FLAG) {
diff --git a/storage/xtradb/log/log0log.cc b/storage/xtradb/log/log0log.cc
index 2dc129b1bb3..493f008c8b4 100644
--- a/storage/xtradb/log/log0log.cc
+++ b/storage/xtradb/log/log0log.cc
@@ -255,7 +255,7 @@ log_buffer_extend(
{
ulint move_start;
ulint move_end;
- byte *tmp_buf = (byte*)alloca(OS_FILE_LOG_BLOCK_SIZE);
+ byte* tmp_buf = static_cast<byte *>(alloca(OS_FILE_LOG_BLOCK_SIZE));
mutex_enter(&(log_sys->mutex));
@@ -3050,9 +3050,9 @@ log_archive_do(
ulint* n_bytes)/*!< out: archive log buffer size, 0 if nothing to
archive */
{
- ibool calc_new_limit;
- ib_uint64_t start_lsn;
- ib_uint64_t limit_lsn;
+ ibool calc_new_limit;
+ lsn_t start_lsn;
+ lsn_t limit_lsn = LSN_MAX;
calc_new_limit = TRUE;
loop:
@@ -3718,8 +3718,14 @@ loop:
lsn = log_sys->lsn;
- if (lsn != log_sys->last_checkpoint_lsn
- || (srv_track_changed_pages && (tracked_lsn != log_sys->last_checkpoint_lsn))
+ ut_ad(srv_force_recovery != SRV_FORCE_NO_LOG_REDO
+ || lsn == log_sys->last_checkpoint_lsn + LOG_BLOCK_HDR_SIZE);
+
+
+ if ((srv_force_recovery != SRV_FORCE_NO_LOG_REDO
+ && lsn != log_sys->last_checkpoint_lsn)
+ || (srv_track_changed_pages
+ && (tracked_lsn != log_sys->last_checkpoint_lsn))
#ifdef UNIV_LOG_ARCHIVE
|| (srv_log_archive_on
&& lsn != log_sys->archived_lsn + LOG_BLOCK_HDR_SIZE)
diff --git a/storage/xtradb/log/log0recv.cc b/storage/xtradb/log/log0recv.cc
index 3e0ec3d15fa..f3c4887874d 100644
--- a/storage/xtradb/log/log0recv.cc
+++ b/storage/xtradb/log/log0recv.cc
@@ -3097,7 +3097,8 @@ recv_recovery_from_checkpoint_start_func(
#endif /* UNIV_LOG_ARCHIVE */
byte* buf;
byte* log_hdr_buf;
- byte* log_hdr_buf_base = (byte*)alloca(LOG_FILE_HDR_SIZE + OS_FILE_LOG_BLOCK_SIZE);
+ byte* log_hdr_buf_base = static_cast<byte *>
+ (alloca(LOG_FILE_HDR_SIZE + OS_FILE_LOG_BLOCK_SIZE));
dberr_t err;
ut_when_dtor<recv_dblwr_t> tmp(recv_sys->dblwr);
diff --git a/storage/xtradb/os/os0file.cc b/storage/xtradb/os/os0file.cc
index 2ddba0e0f6d..ad92cc396cc 100644
--- a/storage/xtradb/os/os0file.cc
+++ b/storage/xtradb/os/os0file.cc
@@ -571,6 +571,8 @@ os_file_get_last_error_low(
return(OS_FILE_OPERATION_ABORTED);
} else if (err == ERROR_ACCESS_DENIED) {
return(OS_FILE_ACCESS_VIOLATION);
+ } else if (err == ERROR_BUFFER_OVERFLOW) {
+ return(OS_FILE_NAME_TOO_LONG);
} else {
return(OS_FILE_ERROR_MAX + err);
}
@@ -632,6 +634,8 @@ os_file_get_last_error_low(
return(OS_FILE_NOT_FOUND);
case EEXIST:
return(OS_FILE_ALREADY_EXISTS);
+ case ENAMETOOLONG:
+ return(OS_FILE_NAME_TOO_LONG);
case EXDEV:
case ENOTDIR:
case EISDIR:
@@ -2179,8 +2183,6 @@ os_file_close_func(
#ifdef __WIN__
BOOL ret;
- ut_a(file);
-
ret = CloseHandle(file);
if (ret) {
@@ -2217,8 +2219,6 @@ os_file_close_no_error_handling(
#ifdef __WIN__
BOOL ret;
- ut_a(file);
-
ret = CloseHandle(file);
if (ret) {
@@ -2470,8 +2470,6 @@ os_file_flush_func(
#ifdef __WIN__
BOOL ret;
- ut_a(file);
-
os_n_fsyncs++;
ret = FlushFileBuffers(file);
@@ -2875,7 +2873,6 @@ os_file_read_func(
os_bytes_read_since_printout += n;
try_again:
- ut_ad(file);
ut_ad(buf);
ut_ad(n > 0);
@@ -2975,7 +2972,6 @@ os_file_read_no_error_handling_func(
os_bytes_read_since_printout += n;
try_again:
- ut_ad(file);
ut_ad(buf);
ut_ad(n > 0);
@@ -3081,7 +3077,6 @@ os_file_write_func(
os_n_file_writes++;
- ut_ad(file);
ut_ad(buf);
ut_ad(n > 0);
retry:
@@ -3243,7 +3238,7 @@ os_file_status(
struct _stat64 statinfo;
ret = _stat64(path, &statinfo);
- if (ret && (errno == ENOENT || errno == ENOTDIR)) {
+ if (ret && (errno == ENOENT || errno == ENOTDIR || errno == ENAMETOOLONG)) {
/* file does not exist */
*exists = FALSE;
return(TRUE);
@@ -3271,7 +3266,7 @@ os_file_status(
struct stat statinfo;
ret = stat(path, &statinfo);
- if (ret && (errno == ENOENT || errno == ENOTDIR)) {
+ if (ret && (errno == ENOENT || errno == ENOTDIR || errno == ENAMETOOLONG)) {
/* file does not exist */
*exists = FALSE;
return(TRUE);
@@ -4751,7 +4746,6 @@ os_aio_func(
#endif
ulint wake_later;
- ut_ad(file);
ut_ad(buf);
ut_ad(n > 0);
ut_ad(n % OS_MIN_LOG_BLOCK_SIZE == 0);
@@ -5621,12 +5615,10 @@ consecutive_loop:
aio_slot->offset, total_len);
}
- DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28_2",
- os_has_said_disk_full = FALSE;);
- DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28_2",
- ret = 0;);
- DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28_2",
- errno = 28;);
+ if (aio_slot->type == OS_FILE_WRITE) {
+ DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28_2",
+ os_has_said_disk_full = FALSE; ret = 0; errno = 28;);
+ }
srv_set_io_thread_op_info(global_segment, "file i/o done");
diff --git a/storage/xtradb/os/os0thread.cc b/storage/xtradb/os/os0thread.cc
index 848c0ca4212..8420a94787b 100644
--- a/storage/xtradb/os/os0thread.cc
+++ b/storage/xtradb/os/os0thread.cc
@@ -138,6 +138,9 @@ os_thread_create_func(
os_thread_id_t* thread_id) /*!< out: id of the created
thread, or NULL */
{
+ /* the new thread should look recent changes up here so far. */
+ os_wmb;
+
#ifdef __WIN__
os_thread_t thread;
DWORD win_thread_id;
@@ -156,10 +159,8 @@ os_thread_create_func(
if (thread_id) {
*thread_id = win_thread_id;
}
- if (thread) {
- CloseHandle(thread);
- }
- return((os_thread_t)win_thread_id);
+
+ return((os_thread_t)thread);
#else
int ret;
os_thread_t pthread;
diff --git a/storage/xtradb/page/page0zip.cc b/storage/xtradb/page/page0zip.cc
index cb97e666e75..535497480b7 100644
--- a/storage/xtradb/page/page0zip.cc
+++ b/storage/xtradb/page/page0zip.cc
@@ -4918,8 +4918,15 @@ page_zip_verify_checksum(
stored = static_cast<ib_uint32_t>(mach_read_from_4(
static_cast<const unsigned char*>(data) + FIL_PAGE_SPACE_OR_CHKSUM));
- /* declare empty pages non-corrupted */
- if (stored == 0) {
+#if FIL_PAGE_LSN % 8
+#error "FIL_PAGE_LSN must be 64 bit aligned"
+#endif
+
+ /* Check if page is empty */
+ if (stored == 0
+ && *reinterpret_cast<const ib_uint64_t*>(static_cast<const char*>(
+ data)
+ + FIL_PAGE_LSN) == 0) {
/* make sure that the page is really empty */
ulint i;
for (i = 0; i < size; i++) {
@@ -4927,7 +4934,7 @@ page_zip_verify_checksum(
return(FALSE);
}
}
-
+ /* Empty page */
return(TRUE);
}
diff --git a/storage/xtradb/row/row0mysql.cc b/storage/xtradb/row/row0mysql.cc
index fc3ec1ff6c3..854ebd6f8cc 100644
--- a/storage/xtradb/row/row0mysql.cc
+++ b/storage/xtradb/row/row0mysql.cc
@@ -3805,6 +3805,10 @@ row_drop_table_for_mysql(
pars_info_t* info = NULL;
mem_heap_t* heap = NULL;
+ DBUG_ENTER("row_drop_table_for_mysql");
+
+ DBUG_PRINT("row_drop_table_for_mysql", ("table: %s", name));
+
ut_a(name != NULL);
if (srv_created_new_raw) {
@@ -3814,7 +3818,7 @@ row_drop_table_for_mysql(
"InnoDB: Shut down mysqld and edit my.cnf so that newraw"
" is replaced with raw.\n", stderr);
- return(DB_ERROR);
+ DBUG_RETURN(DB_ERROR);
}
/* The table name is prefixed with the database name and a '/'.
@@ -4442,7 +4446,7 @@ funct_exit:
srv_wake_master_thread();
- return(err);
+ DBUG_RETURN(err);
}
/*********************************************************************//**
diff --git a/storage/xtradb/srv/srv0srv.cc b/storage/xtradb/srv/srv0srv.cc
index 8054afaba2e..cfa00e68dee 100644
--- a/storage/xtradb/srv/srv0srv.cc
+++ b/storage/xtradb/srv/srv0srv.cc
@@ -1636,15 +1636,6 @@ srv_printf_innodb_monitor(
srv_n_system_rows_deleted_old = srv_stats.n_system_rows_deleted;
srv_n_system_rows_read_old = srv_stats.n_system_rows_read;
- /* Only if lock_print_info_summary proceeds correctly,
- before we call the lock_print_info_all_transactions
- to print all the lock information. */
- ret = lock_print_info_summary(file, nowait);
-
- if (ret) {
- lock_print_info_all_transactions(file);
- }
-
fputs("----------------------------\n"
"END OF INNODB MONITOR OUTPUT\n"
"============================\n", file);
diff --git a/storage/xtradb/srv/srv0start.cc b/storage/xtradb/srv/srv0start.cc
index 003ad305309..cb6d648722b 100644
--- a/storage/xtradb/srv/srv0start.cc
+++ b/storage/xtradb/srv/srv0start.cc
@@ -134,6 +134,21 @@ static ulint n[SRV_MAX_N_IO_THREADS + 6];
static os_thread_id_t thread_ids[SRV_MAX_N_IO_THREADS + 6
+ SRV_MAX_N_PURGE_THREADS];
+/** Thead handles */
+static os_thread_t thread_handles[SRV_MAX_N_IO_THREADS + 6 + SRV_MAX_N_PURGE_THREADS];
+static os_thread_t buf_flush_page_cleaner_thread_handle;
+static os_thread_t buf_dump_thread_handle;
+static os_thread_t dict_stats_thread_handle;
+static os_thread_t buf_flush_lru_manager_thread_handle;
+static os_thread_t srv_redo_log_follow_thread_handle;
+/** Status variables, is thread started ?*/
+static bool thread_started[SRV_MAX_N_IO_THREADS + 6 + SRV_MAX_N_PURGE_THREADS] = {false};
+static bool buf_flush_page_cleaner_thread_started = false;
+static bool buf_dump_thread_started = false;
+static bool dict_stats_thread_started = false;
+static bool buf_flush_lru_manager_thread_started = false;
+static bool srv_redo_log_follow_thread_started = false;
+
/** We use this mutex to test the return value of pthread_mutex_trylock
on successful locking. HP-UX does NOT return 0, though Linux et al do. */
static os_fast_mutex_t srv_os_test_mutex;
@@ -1532,8 +1547,9 @@ init_log_online(void)
/* Create the thread that follows the redo log to output the
changed page bitmap */
- os_thread_create(&srv_redo_log_follow_thread, NULL,
+ srv_redo_log_follow_thread_handle = os_thread_create(&srv_redo_log_follow_thread, NULL,
thread_ids + 5 + SRV_MAX_N_IO_THREADS);
+ srv_redo_log_follow_thread_started = true;
}
}
@@ -1550,8 +1566,8 @@ innobase_start_or_create_for_mysql(void)
lsn_t min_flushed_lsn;
lsn_t max_flushed_lsn;
#ifdef UNIV_LOG_ARCHIVE
- lsn_t min_arch_log_no;
- lsn_t max_arch_log_no;
+ lsn_t min_arch_log_no = LSN_MAX;
+ lsn_t max_arch_log_no = LSN_MAX;
#endif /* UNIV_LOG_ARCHIVE */
ulint sum_of_new_sizes;
ulint sum_of_data_file_sizes;
@@ -1633,7 +1649,7 @@ innobase_start_or_create_for_mysql(void)
stacktrace feature. */
if (srv_use_stacktrace) {
-#ifdef __linux__
+#if defined (__linux__) && HAVE_BACKTRACE && HAVE_BACKTRACE_SYMBOLS
struct sigaction sigact;
sigact.sa_sigaction = os_stacktrace_print;
@@ -1646,7 +1662,7 @@ innobase_start_or_create_for_mysql(void)
srv_use_stacktrace = FALSE;
}
-#endif /* __linux__ */
+#endif /* defined (__linux__) && HAVE_BACKTRACE && HAVE_BACKTRACE_SYMBOLS */
}
#ifdef UNIV_DEBUG
@@ -2059,7 +2075,8 @@ innobase_start_or_create_for_mysql(void)
n[i] = i;
- os_thread_create(io_handler_thread, n + i, thread_ids + i);
+ thread_handles[i] = os_thread_create(io_handler_thread, n + i, thread_ids + i);
+ thread_started[i] = true;
}
if (srv_n_log_files * srv_log_file_size * UNIV_PAGE_SIZE
@@ -2722,19 +2739,22 @@ files_checked:
if (!srv_read_only_mode) {
/* Create the thread which watches the timeouts
for lock waits */
- os_thread_create(
+ thread_handles[2 + SRV_MAX_N_IO_THREADS] = os_thread_create(
lock_wait_timeout_thread,
NULL, thread_ids + 2 + SRV_MAX_N_IO_THREADS);
+ thread_started[2 + SRV_MAX_N_IO_THREADS] = true;
/* Create the thread which warns of long semaphore waits */
- os_thread_create(
+ thread_handles[3 + SRV_MAX_N_IO_THREADS] = os_thread_create(
srv_error_monitor_thread,
NULL, thread_ids + 3 + SRV_MAX_N_IO_THREADS);
+ thread_started[3 + SRV_MAX_N_IO_THREADS] = true;
/* Create the thread which prints InnoDB monitor info */
- os_thread_create(
+ thread_handles[4 + SRV_MAX_N_IO_THREADS] = os_thread_create(
srv_monitor_thread,
NULL, thread_ids + 4 + SRV_MAX_N_IO_THREADS);
+ thread_started[4 + SRV_MAX_N_IO_THREADS] = true;
}
/* Create the SYS_FOREIGN and SYS_FOREIGN_COLS system tables */
@@ -2761,26 +2781,30 @@ files_checked:
if (!srv_read_only_mode) {
- os_thread_create(
+ thread_handles[1 + SRV_MAX_N_IO_THREADS] = os_thread_create(
srv_master_thread,
NULL, thread_ids + (1 + SRV_MAX_N_IO_THREADS));
+ thread_started[1 + SRV_MAX_N_IO_THREADS] = true;
}
if (!srv_read_only_mode
&& srv_force_recovery < SRV_FORCE_NO_BACKGROUND) {
- os_thread_create(
+ thread_handles[5 + SRV_MAX_N_IO_THREADS] = os_thread_create(
srv_purge_coordinator_thread,
NULL, thread_ids + 5 + SRV_MAX_N_IO_THREADS);
+ thread_started[5 + SRV_MAX_N_IO_THREADS] = true;
+
ut_a(UT_ARR_SIZE(thread_ids)
> 5 + srv_n_purge_threads + SRV_MAX_N_IO_THREADS);
/* We've already created the purge coordinator thread above. */
for (i = 1; i < srv_n_purge_threads; ++i) {
- os_thread_create(
+ thread_handles[5 + i + SRV_MAX_N_IO_THREADS] = os_thread_create(
srv_worker_thread, NULL,
thread_ids + 5 + i + SRV_MAX_N_IO_THREADS);
+ thread_started[5 + i + SRV_MAX_N_IO_THREADS] = true;
}
srv_start_wait_for_purge_to_start();
@@ -2790,9 +2814,12 @@ files_checked:
}
if (!srv_read_only_mode) {
- os_thread_create(buf_flush_page_cleaner_thread, NULL, NULL);
+ buf_flush_page_cleaner_thread_handle = os_thread_create(buf_flush_page_cleaner_thread, NULL, NULL);
+ buf_flush_page_cleaner_thread_started = true;
}
- os_thread_create(buf_flush_lru_manager_thread, NULL, NULL);
+
+ buf_flush_lru_manager_thread_handle = os_thread_create(buf_flush_lru_manager_thread, NULL, NULL);
+ buf_flush_lru_manager_thread_started = true;
#ifdef UNIV_DEBUG
/* buf_debug_prints = TRUE; */
@@ -2943,10 +2970,12 @@ files_checked:
if (!srv_read_only_mode) {
/* Create the buffer pool dump/load thread */
- os_thread_create(buf_dump_thread, NULL, NULL);
+ buf_dump_thread_handle = os_thread_create(buf_dump_thread, NULL, NULL);
+ buf_dump_thread_started = true;
/* Create the dict stats gathering thread */
- os_thread_create(dict_stats_thread, NULL, NULL);
+ dict_stats_thread_handle = os_thread_create(dict_stats_thread, NULL, NULL);
+ dict_stats_thread_started = true;
/* Create the thread that will optimize the FTS sub-system. */
fts_optimize_init();
@@ -3115,6 +3144,42 @@ innobase_shutdown_for_mysql(void)
dict_stats_thread_deinit();
}
+#ifdef __WIN__
+ /* MDEV-361: ha_innodb.dll leaks handles on Windows
+ MDEV-7403: should not pass recv_writer_thread_handle to
+ CloseHandle().
+
+ On Windows we should call CloseHandle() for all
+ open thread handles. */
+ if (os_thread_count == 0) {
+ for (i = 0; i < SRV_MAX_N_IO_THREADS + 6 + 32; ++i) {
+ if (thread_started[i]) {
+ CloseHandle(thread_handles[i]);
+ }
+ }
+
+ if (buf_flush_page_cleaner_thread_started) {
+ CloseHandle(buf_flush_page_cleaner_thread_handle);
+ }
+
+ if (buf_dump_thread_started) {
+ CloseHandle(buf_dump_thread_handle);
+ }
+
+ if (dict_stats_thread_started) {
+ CloseHandle(dict_stats_thread_handle);
+ }
+
+ if (buf_flush_lru_manager_thread_started) {
+ CloseHandle(buf_flush_lru_manager_thread_handle);
+ }
+
+ if (srv_redo_log_follow_thread_started) {
+ CloseHandle(srv_redo_log_follow_thread_handle);
+ }
+ }
+#endif /* __WIN __ */
+
/* This must be disabled before closing the buffer pool
and closing the data dictionary. */
btr_search_disable();
diff --git a/storage/xtradb/sync/sync0rw.cc b/storage/xtradb/sync/sync0rw.cc
index 7fad78ea577..3296e2e74a7 100644
--- a/storage/xtradb/sync/sync0rw.cc
+++ b/storage/xtradb/sync/sync0rw.cc
@@ -321,6 +321,7 @@ rw_lock_free_func(
ib_mutex_t* mutex;
#endif /* !INNODB_RW_LOCKS_USE_ATOMICS */
+ os_rmb;
ut_ad(rw_lock_validate(lock));
ut_a(lock->lock_word == X_LOCK_DECR);
diff --git a/support-files/mysql.spec.sh b/support-files/mysql.spec.sh
index f4a4c5fc0d8..0750a2392eb 100644
--- a/support-files/mysql.spec.sh
+++ b/support-files/mysql.spec.sh
@@ -526,12 +526,14 @@ mkdir debug
# Attempt to remove any optimisation flags from the debug build
CFLAGS=`echo " ${CFLAGS} " | \
sed -e 's/ -O[0-9]* / /' \
+ -e 's/-Wp,-D_FORTIFY_SOURCE=2/ /' \
-e 's/ -unroll2 / /' \
-e 's/ -ip / /' \
-e 's/^ //' \
-e 's/ $//'`
CXXFLAGS=`echo " ${CXXFLAGS} " | \
sed -e 's/ -O[0-9]* / /' \
+ -e 's/-Wp,-D_FORTIFY_SOURCE=2/ /' \
-e 's/ -unroll2 / /' \
-e 's/ -ip / /' \
-e 's/^ //' \
@@ -1321,7 +1323,7 @@ echo "=====" >> $STATUS_HISTORY
- Fix duplicate mentioning of "mysql_plugin" and its manual page,
it is better to keep alphabetic order in the files list (merging!).
-
+
* Wed Sep 14 2011 Joerg Bruehe <joerg.bruehe@oracle.com>
- Let the RPM capabilities ("obsoletes" etc) ensure that an upgrade may replace
@@ -1354,7 +1356,7 @@ echo "=====" >> $STATUS_HISTORY
* Fri Aug 19 2011 Joerg Bruehe <joerg.bruehe@oracle.com>
- Null-upmerge the fix of bug#37165: This spec file is not affected.
-- Replace "/var/lib/mysql" by the spec file variable "%{mysqldatadir}".
+- Replace "/var/lib/mysql" by the spec file variable "%%{mysqldatadir}".
* Fri Aug 12 2011 Daniel Fischer <daniel.fischer@oracle.com>
@@ -1374,13 +1376,13 @@ echo "=====" >> $STATUS_HISTORY
not in an RPM upgrade.
This affects both the "mkdir" and the call of "mysql_install_db".
-* Thu Feb 09 2011 Joerg Bruehe <joerg.bruehe@oracle.com>
+* Wed Feb 09 2011 Joerg Bruehe <joerg.bruehe@oracle.com>
- Fix bug#56581: If an installation deviates from the default file locations
("datadir" and "pid-file"), the mechanism to detect a running server (on upgrade)
should still work, and use these locations.
The problem was that the fix for bug#27072 did not check for local settings.
-
+
* Mon Jan 31 2011 Joerg Bruehe <joerg.bruehe@oracle.com>
- Install the new "manifest" files: "INFO_SRC" and "INFO_BIN".
@@ -1495,7 +1497,7 @@ echo "=====" >> $STATUS_HISTORY
- Fix some problems with the directives around "tcmalloc" (experimental),
remove erroneous traces of the InnoDB plugin (that is 5.1 only).
-* Fri Oct 06 2009 Magnus Blaudd <mvensson@mysql.com>
+* Tue Oct 06 2009 Magnus Blaudd <mvensson@mysql.com>
- Removed mysql_fix_privilege_tables
@@ -1613,7 +1615,7 @@ echo "=====" >> $STATUS_HISTORY
* Thu Nov 30 2006 Joerg Bruehe <joerg@mysql.com>
-- Call "make install" using "benchdir_root=%{_datadir}",
+- Call "make install" using "benchdir_root=%%{_datadir}",
because that is affecting the regression test suite as well.
* Thu Nov 16 2006 Joerg Bruehe <joerg@mysql.com>
@@ -1692,7 +1694,7 @@ echo "=====" >> $STATUS_HISTORY
- Set $LDFLAGS from $MYSQL_BUILD_LDFLAGS
-* Wed Mar 07 2006 Kent Boortz <kent@mysql.com>
+* Tue Mar 07 2006 Kent Boortz <kent@mysql.com>
- Changed product name from "Community Edition" to "Community Server"
@@ -1730,7 +1732,7 @@ echo "=====" >> $STATUS_HISTORY
- Added zlib to the list of (static) libraries installed
- Added check against libtool wierdness (WRT: sql/mysqld || sql/.libs/mysqld)
- Compile MySQL with bundled zlib
-- Fixed %packager name to "MySQL Production Engineering Team"
+- Fixed %%packager name to "MySQL Production Engineering Team"
* Mon Dec 05 2005 Joerg Bruehe <joerg@mysql.com>
@@ -1880,7 +1882,7 @@ echo "=====" >> $STATUS_HISTORY
- ISAM and merge storage engines were purged. As well as appropriate
tools and manpages (isamchk and isamlog)
-* Thu Dec 31 2004 Lenz Grimmer <lenz@mysql.com>
+* Fri Dec 31 2004 Lenz Grimmer <lenz@mysql.com>
- enabled the "Archive" storage engine for the max binary
- enabled the "CSV" storage engine for the max binary
@@ -1940,7 +1942,7 @@ echo "=====" >> $STATUS_HISTORY
- marked /etc/logrotate.d/mysql as a config file (BUG 2156)
-* Fri Dec 13 2003 Lenz Grimmer <lenz@mysql.com>
+* Sat Dec 13 2003 Lenz Grimmer <lenz@mysql.com>
- fixed file permissions (BUG 1672)
@@ -2082,7 +2084,7 @@ echo "=====" >> $STATUS_HISTORY
- Added separate libmysql_r directory; now both a threaded
and non-threaded library is shipped.
-* Wed Sep 28 1999 David Axmark <davida@mysql.com>
+* Tue Sep 28 1999 David Axmark <davida@mysql.com>
- Added the support-files/my-example.cnf to the docs directory.
diff --git a/support-files/rpm/server-postin.sh b/support-files/rpm/server-postin.sh
index 91885f73466..cd2aec4d84a 100644
--- a/support-files/rpm/server-postin.sh
+++ b/support-files/rpm/server-postin.sh
@@ -1,6 +1,10 @@
# Make MySQL start/shutdown automatically when the machine does it.
if [ $1 = 1 ] ; then
+ if [ -x /usr/bin/systemctl ] ; then
+ /usr/bin/systemctl daemon-reload >/dev/null 2>&1
+ fi
+
if [ -x /sbin/chkconfig ] ; then
/sbin/chkconfig --add mysql
fi
@@ -51,30 +55,31 @@ fi
SETARGETDIR=/etc/selinux/targeted/src/policy
SEDOMPROG=$SETARGETDIR/domains/program
SECONPROG=$SETARGETDIR/file_contexts/program
-if [ -f /etc/redhat-release ] \
- && grep -q "Red Hat Enterprise Linux .. release 4" /etc/redhat-release \
- || grep -q "CentOS release 4" /etc/redhat-release ; then
- echo
- echo
- echo 'Notes regarding SELinux on this platform:'
- echo '========================================='
- echo
- echo 'The default policy might cause server startup to fail because it is '
- echo 'not allowed to access critical files. In this case, please update '
- echo 'your installation. '
- echo
- echo 'The default policy might also cause inavailability of SSL related '
- echo 'features because the server is not allowed to access /dev/random '
- echo 'and /dev/urandom. If this is a problem, please do the following: '
- echo
- echo ' 1) install selinux-policy-targeted-sources from your OS vendor'
- echo ' 2) add the following two lines to '$SEDOMPROG/mysqld.te':'
- echo ' allow mysqld_t random_device_t:chr_file read;'
- echo ' allow mysqld_t urandom_device_t:chr_file read;'
- echo ' 3) cd to '$SETARGETDIR' and issue the following command:'
- echo ' make load'
- echo
- echo
+if [ -f /etc/redhat-release ] ; then
+ if grep '\(Red Hat Enterprise Linux ..\|CentOS\) release 4' \
+ /etc/redhat-release >/dev/null 2>&1; then
+ echo
+ echo
+ echo 'Notes regarding SELinux on this platform:'
+ echo '========================================='
+ echo
+ echo 'The default policy might cause server startup to fail because it is '
+ echo 'not allowed to access critical files. In this case, please update '
+ echo 'your installation. '
+ echo
+ echo 'The default policy might also cause inavailability of SSL related '
+ echo 'features because the server is not allowed to access /dev/random '
+ echo 'and /dev/urandom. If this is a problem, please do the following: '
+ echo
+ echo ' 1) install selinux-policy-targeted-sources from your OS vendor'
+ echo ' 2) add the following two lines to '$SEDOMPROG/mysqld.te':'
+ echo ' allow mysqld_t random_device_t:chr_file read;'
+ echo ' allow mysqld_t urandom_device_t:chr_file read;'
+ echo ' 3) cd to '$SETARGETDIR' and issue the following command:'
+ echo ' make load'
+ echo
+ echo
+ fi
fi
if [ -x sbin/restorecon ] ; then
diff --git a/support-files/rpm/server-postun.sh b/support-files/rpm/server-postun.sh
index dcf67173a02..412c6f4c67b 100644
--- a/support-files/rpm/server-postun.sh
+++ b/support-files/rpm/server-postun.sh
@@ -6,3 +6,10 @@ if [ $1 -ge 1 ]; then
fi
fi
fi
+
+if [ $1 = 0 ] ; then
+ if [ -x /usr/bin/systemctl ] ; then
+ /usr/bin/systemctl daemon-reload > /dev/null 2>&1
+ fi
+fi
+
diff --git a/tests/fork_big.pl b/tests/fork_big.pl
index b866b899f8d..6e78e779d11 100755
--- a/tests/fork_big.pl
+++ b/tests/fork_big.pl
@@ -2,7 +2,7 @@
# Copyright (c) 2001, 2006 MySQL AB
# Use is subject to license terms
-
+#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
diff --git a/tests/fork_big2.pl b/tests/fork_big2.pl
index e92cf869e52..0f1d65be8f9 100644
--- a/tests/fork_big2.pl
+++ b/tests/fork_big2.pl
@@ -2,7 +2,7 @@
# Copyright (c) 2002, 2003, 2005, 2006 MySQL AB
# Use is subject to license terms
-
+#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
diff --git a/vio/viossl.c b/vio/viossl.c
index 2bee31f66ab..0bc2c263336 100644
--- a/vio/viossl.c
+++ b/vio/viossl.c
@@ -40,41 +40,6 @@
#define SSL_errno(X,Y) ERR_get_error()
#endif
-#ifndef DBUG_OFF
-
-static void
-report_errors(SSL* ssl)
-{
- unsigned long l;
- const char *file;
- const char *data;
- int line, flags;
- char buf[512];
-
- DBUG_ENTER("report_errors");
-
- while ((l= ERR_get_error_line_data(&file,&line,&data,&flags)))
- {
- DBUG_PRINT("error", ("OpenSSL: %s:%s:%d:%s\n", ERR_error_string(l,buf),
- file,line,(flags&ERR_TXT_STRING)?data:"")) ;
- }
-
- if (ssl)
- {
-#ifndef DBUG_OFF
- ulong error= SSL_errno(ssl, l);
- DBUG_PRINT("error", ("error: %s (%lu)",
- ERR_error_string(error, buf), error));
-#endif
- }
-
- DBUG_PRINT("info", ("socket_errno: %d", socket_errno));
- DBUG_VOID_RETURN;
-}
-
-#endif
-
-
/**
Obtain the equivalent system error status for the last SSL I/O operation.
@@ -157,9 +122,6 @@ static my_bool ssl_should_retry(Vio *vio, int ret, enum enum_vio_io_event *event
*event= VIO_IO_EVENT_WRITE;
break;
default:
-#ifndef DBUG_OFF
- report_errors(ssl);
-#endif
should_retry= FALSE;
ssl_set_sys_error(ssl_error);
break;
@@ -195,10 +157,6 @@ size_t vio_ssl_read(Vio *vio, uchar *buf, size_t size)
}
}
-#ifndef DBUG_OFF
- if (ret < 0)
- report_errors((SSL*) vio->ssl_arg);
-#endif
DBUG_PRINT("exit", ("%d", (int) ret));
DBUG_RETURN(ret < 0 ? -1 : ret);
@@ -233,10 +191,6 @@ size_t vio_ssl_write(Vio *vio, const uchar *buf, size_t size)
}
}
-#ifndef DBUG_OFF
- if (ret < 0)
- report_errors((SSL*) vio->ssl_arg);
-#endif
DBUG_RETURN(ret < 0 ? -1 : ret);
}
diff --git a/vio/viosslfactories.c b/vio/viosslfactories.c
index 20bed2b6728..ee944c68f92 100644
--- a/vio/viosslfactories.c
+++ b/vio/viosslfactories.c
@@ -51,27 +51,6 @@ static DH *get_dh512(void)
}
-static void
-report_errors()
-{
- unsigned long l;
- const char* file;
- const char* data;
- int line,flags;
-
- DBUG_ENTER("report_errors");
-
- while ((l=ERR_get_error_line_data(&file,&line,&data,&flags)) != 0)
- {
-#ifndef DBUG_OFF /* Avoid warning */
- char buf[200];
- DBUG_PRINT("error", ("OpenSSL: %s:%s:%d:%s\n", ERR_error_string(l,buf),
- file,line,(flags & ERR_TXT_STRING) ? data : "")) ;
-#endif
- }
- DBUG_VOID_RETURN;
-}
-
static const char*
ssl_error_string[] =
{
@@ -198,7 +177,6 @@ new_VioSSLFd(const char *key_file, const char *cert_file,
{
*error= SSL_INITERR_MEMFAIL;
DBUG_PRINT("error", ("%s", sslGetErrString(*error)));
- report_errors();
my_free(ssl_fd);
DBUG_RETURN(0);
}
@@ -215,7 +193,6 @@ new_VioSSLFd(const char *key_file, const char *cert_file,
{
*error= SSL_INITERR_CIPHERS;
DBUG_PRINT("error", ("%s", sslGetErrString(*error)));
- report_errors();
SSL_CTX_free(ssl_fd->ssl_context);
my_free(ssl_fd);
DBUG_RETURN(0);
@@ -232,7 +209,6 @@ new_VioSSLFd(const char *key_file, const char *cert_file,
*error= SSL_INITERR_BAD_PATHS;
DBUG_PRINT("error", ("SSL_CTX_load_verify_locations failed : %s",
sslGetErrString(*error)));
- report_errors();
SSL_CTX_free(ssl_fd->ssl_context);
my_free(ssl_fd);
DBUG_RETURN(0);
@@ -243,7 +219,6 @@ new_VioSSLFd(const char *key_file, const char *cert_file,
{
*error= SSL_INITERR_BAD_PATHS;
DBUG_PRINT("error", ("%s", sslGetErrString(*error)));
- report_errors();
SSL_CTX_free(ssl_fd->ssl_context);
my_free(ssl_fd);
DBUG_RETURN(0);
@@ -266,7 +241,6 @@ new_VioSSLFd(const char *key_file, const char *cert_file,
DBUG_PRINT("warning", ("X509_STORE_load_locations for CRL failed"));
*error= SSL_INITERR_BAD_PATHS;
DBUG_PRINT("error", ("%s", sslGetErrString(*error)));
- report_errors();
SSL_CTX_free(ssl_fd->ssl_context);
my_free(ssl_fd);
DBUG_RETURN(0);
@@ -277,7 +251,6 @@ new_VioSSLFd(const char *key_file, const char *cert_file,
if (vio_set_cert_stuff(ssl_fd->ssl_context, cert_file, key_file, error))
{
DBUG_PRINT("error", ("vio_set_cert_stuff failed"));
- report_errors();
SSL_CTX_free(ssl_fd->ssl_context);
my_free(ssl_fd);
DBUG_RETURN(0);